2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9 * Copyright (C) 2000-2003 Broadcom Corporation.
12 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
34 #include <net/checksum.h>
36 #include <asm/system.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
50 #define TG3_VLAN_TAG_USED 0
54 #define TG3_TSO_SUPPORT 1
56 #define TG3_TSO_SUPPORT 0
61 #define DRV_MODULE_NAME "tg3"
62 #define PFX DRV_MODULE_NAME ": "
63 #define DRV_MODULE_VERSION "3.23"
64 #define DRV_MODULE_RELDATE "February 15, 2005"
66 #define TG3_DEF_MAC_MODE 0
67 #define TG3_DEF_RX_MODE 0
68 #define TG3_DEF_TX_MODE 0
69 #define TG3_DEF_MSG_ENABLE \
79 /* length of time before we decide the hardware is borked,
80 * and dev->tx_timeout() should be called to fix the problem
82 #define TG3_TX_TIMEOUT (5 * HZ)
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU 60
86 #define TG3_MAX_MTU(tp) \
87 ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91 * You can't change the ring sizes, but you can change where you place
92 * them in the NIC onboard memory.
94 #define TG3_RX_RING_SIZE 512
95 #define TG3_DEF_RX_RING_PENDING 200
96 #define TG3_RX_JUMBO_RING_SIZE 256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
99 /* Do not place this n-ring entries value into the tp struct itself,
100 * we really want to expose these constants to GCC so that modulo et
101 * al. operations are done with shifts and masks instead of with
102 * hw multiply/modulo instructions. Another solution would be to
103 * replace things like '% foo' with '& (foo - 1)'.
105 #define TG3_RX_RCB_RING_SIZE(tp) \
106 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
110 #define TG3_TX_RING_SIZE 512
111 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 #define TX_RING_GAP(TP) \
122 (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP) \
124 (((TP)->tx_cons <= (TP)->tx_prod) ? \
125 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
126 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138 static char version[] __devinitdata =
139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_VERSION(DRV_MODULE_VERSION);
146 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
147 module_param(tg3_debug, int, 0);
148 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150 static struct pci_device_id tg3_pci_tbl[] = {
151 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
237 const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
241 { "rx_ucast_packets" },
242 { "rx_mcast_packets" },
243 { "rx_bcast_packets" },
245 { "rx_align_errors" },
246 { "rx_xon_pause_rcvd" },
247 { "rx_xoff_pause_rcvd" },
248 { "rx_mac_ctrl_rcvd" },
249 { "rx_xoff_entered" },
250 { "rx_frame_too_long_errors" },
252 { "rx_undersize_packets" },
253 { "rx_in_length_errors" },
254 { "rx_out_length_errors" },
255 { "rx_64_or_less_octet_packets" },
256 { "rx_65_to_127_octet_packets" },
257 { "rx_128_to_255_octet_packets" },
258 { "rx_256_to_511_octet_packets" },
259 { "rx_512_to_1023_octet_packets" },
260 { "rx_1024_to_1522_octet_packets" },
261 { "rx_1523_to_2047_octet_packets" },
262 { "rx_2048_to_4095_octet_packets" },
263 { "rx_4096_to_8191_octet_packets" },
264 { "rx_8192_to_9022_octet_packets" },
271 { "tx_flow_control" },
273 { "tx_single_collisions" },
274 { "tx_mult_collisions" },
276 { "tx_excessive_collisions" },
277 { "tx_late_collisions" },
278 { "tx_collide_2times" },
279 { "tx_collide_3times" },
280 { "tx_collide_4times" },
281 { "tx_collide_5times" },
282 { "tx_collide_6times" },
283 { "tx_collide_7times" },
284 { "tx_collide_8times" },
285 { "tx_collide_9times" },
286 { "tx_collide_10times" },
287 { "tx_collide_11times" },
288 { "tx_collide_12times" },
289 { "tx_collide_13times" },
290 { "tx_collide_14times" },
291 { "tx_collide_15times" },
292 { "tx_ucast_packets" },
293 { "tx_mcast_packets" },
294 { "tx_bcast_packets" },
295 { "tx_carrier_sense_errors" },
299 { "dma_writeq_full" },
300 { "dma_write_prioq_full" },
304 { "rx_threshold_hit" },
306 { "dma_readq_full" },
307 { "dma_read_prioq_full" },
308 { "tx_comp_queue_full" },
310 { "ring_set_send_prod_index" },
311 { "ring_status_update" },
313 { "nic_avoided_irqs" },
314 { "nic_tx_threshold_hit" }
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
319 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
322 spin_lock_irqsave(&tp->indirect_lock, flags);
323 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325 spin_unlock_irqrestore(&tp->indirect_lock, flags);
327 writel(val, tp->regs + off);
328 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329 readl(tp->regs + off);
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
335 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
338 spin_lock_irqsave(&tp->indirect_lock, flags);
339 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341 spin_unlock_irqrestore(&tp->indirect_lock, flags);
343 void __iomem *dest = tp->regs + off;
345 readl(dest); /* always flush PCI write */
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
351 void __iomem *mbox = tp->regs + off;
353 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
359 void __iomem *mbox = tp->regs + off;
361 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
363 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
367 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
371 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg) readl(tp->regs + (reg))
376 #define tr16(reg) readw(tp->regs + (reg))
377 #define tr8(reg) readb(tp->regs + (reg))
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
387 /* Always leave this as zero. */
388 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389 spin_unlock_irqrestore(&tp->indirect_lock, flags);
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
396 spin_lock_irqsave(&tp->indirect_lock, flags);
397 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
400 /* Always leave this as zero. */
401 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 static void tg3_disable_ints(struct tg3 *tp)
407 tw32(TG3PCI_MISC_HOST_CTRL,
408 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
413 static inline void tg3_cond_int(struct tg3 *tp)
415 if (tp->hw_status->status & SD_STATUS_UPDATED)
416 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
419 static void tg3_enable_ints(struct tg3 *tp)
421 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
430 * similar to tg3_enable_ints, but it can return without flushing the
431 * PIO write which reenables interrupts
433 static void tg3_restart_ints(struct tg3 *tp)
435 tw32(TG3PCI_MISC_HOST_CTRL,
436 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
443 static inline void tg3_netif_stop(struct tg3 *tp)
445 netif_poll_disable(tp->dev);
446 netif_tx_disable(tp->dev);
449 static inline void tg3_netif_start(struct tg3 *tp)
451 netif_wake_queue(tp->dev);
452 /* NOTE: unconditional netif_wake_queue is only appropriate
453 * so long as all callers are assured to have free tx slots
454 * (such as after tg3_init_hw)
456 netif_poll_enable(tp->dev);
460 static void tg3_switch_clocks(struct tg3 *tp)
462 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
465 orig_clock_ctrl = clock_ctrl;
466 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467 CLOCK_CTRL_CLKRUN_OENABLE |
469 tp->pci_clock_ctrl = clock_ctrl;
471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
473 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
474 tw32_f(TG3PCI_CLOCK_CTRL,
475 clock_ctrl | CLOCK_CTRL_625_CORE);
478 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
479 tw32_f(TG3PCI_CLOCK_CTRL,
481 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
483 tw32_f(TG3PCI_CLOCK_CTRL,
484 clock_ctrl | (CLOCK_CTRL_ALTCLK));
487 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
491 #define PHY_BUSY_LOOPS 5000
493 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
499 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
501 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
507 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
508 MI_COM_PHY_ADDR_MASK);
509 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
510 MI_COM_REG_ADDR_MASK);
511 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
513 tw32_f(MAC_MI_COM, frame_val);
515 loops = PHY_BUSY_LOOPS;
518 frame_val = tr32(MAC_MI_COM);
520 if ((frame_val & MI_COM_BUSY) == 0) {
522 frame_val = tr32(MAC_MI_COM);
530 *val = frame_val & MI_COM_DATA_MASK;
534 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
535 tw32_f(MAC_MI_MODE, tp->mi_mode);
542 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
548 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
550 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
554 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
555 MI_COM_PHY_ADDR_MASK);
556 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
557 MI_COM_REG_ADDR_MASK);
558 frame_val |= (val & MI_COM_DATA_MASK);
559 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
561 tw32_f(MAC_MI_COM, frame_val);
563 loops = PHY_BUSY_LOOPS;
566 frame_val = tr32(MAC_MI_COM);
567 if ((frame_val & MI_COM_BUSY) == 0) {
569 frame_val = tr32(MAC_MI_COM);
579 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
580 tw32_f(MAC_MI_MODE, tp->mi_mode);
587 static void tg3_phy_set_wirespeed(struct tg3 *tp)
591 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
594 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
595 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
596 tg3_writephy(tp, MII_TG3_AUX_CTRL,
597 (val | (1 << 15) | (1 << 4)));
600 static int tg3_bmcr_reset(struct tg3 *tp)
605 /* OK, reset it, and poll the BMCR_RESET bit until it
606 * clears or we time out.
608 phy_control = BMCR_RESET;
609 err = tg3_writephy(tp, MII_BMCR, phy_control);
615 err = tg3_readphy(tp, MII_BMCR, &phy_control);
619 if ((phy_control & BMCR_RESET) == 0) {
631 static int tg3_wait_macro_done(struct tg3 *tp)
638 if (!tg3_readphy(tp, 0x16, &tmp32)) {
639 if ((tmp32 & 0x1000) == 0)
649 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
651 static const u32 test_pat[4][6] = {
652 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
653 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
654 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
655 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
659 for (chan = 0; chan < 4; chan++) {
662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
663 (chan * 0x2000) | 0x0200);
664 tg3_writephy(tp, 0x16, 0x0002);
666 for (i = 0; i < 6; i++)
667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
670 tg3_writephy(tp, 0x16, 0x0202);
671 if (tg3_wait_macro_done(tp)) {
676 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
677 (chan * 0x2000) | 0x0200);
678 tg3_writephy(tp, 0x16, 0x0082);
679 if (tg3_wait_macro_done(tp)) {
684 tg3_writephy(tp, 0x16, 0x0802);
685 if (tg3_wait_macro_done(tp)) {
690 for (i = 0; i < 6; i += 2) {
693 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
694 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
695 tg3_wait_macro_done(tp)) {
701 if (low != test_pat[chan][i] ||
702 high != test_pat[chan][i+1]) {
703 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
704 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
705 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
715 static int tg3_phy_reset_chanpat(struct tg3 *tp)
719 for (chan = 0; chan < 4; chan++) {
722 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
723 (chan * 0x2000) | 0x0200);
724 tg3_writephy(tp, 0x16, 0x0002);
725 for (i = 0; i < 6; i++)
726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
727 tg3_writephy(tp, 0x16, 0x0202);
728 if (tg3_wait_macro_done(tp))
735 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
737 u32 reg32, phy9_orig;
738 int retries, do_phy_reset, err;
744 err = tg3_bmcr_reset(tp);
750 /* Disable transmitter and interrupt. */
751 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
755 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
757 /* Set full-duplex, 1000 mbps. */
758 tg3_writephy(tp, MII_BMCR,
759 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
761 /* Set to master mode. */
762 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
765 tg3_writephy(tp, MII_TG3_CTRL,
766 (MII_TG3_CTRL_AS_MASTER |
767 MII_TG3_CTRL_ENABLE_AS_MASTER));
769 /* Enable SM_DSP_CLOCK and 6dB. */
770 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
772 /* Block the PHY control access. */
773 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
776 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
781 err = tg3_phy_reset_chanpat(tp);
785 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
786 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
788 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
789 tg3_writephy(tp, 0x16, 0x0000);
791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
793 /* Set Extended packet length bit for jumbo frames */
794 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
797 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
800 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
802 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
804 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
811 /* This will reset the tigon3 PHY if there is no valid
812 * link unless the FORCE argument is non-zero.
814 static int tg3_phy_reset(struct tg3 *tp)
819 err = tg3_readphy(tp, MII_BMSR, &phy_status);
820 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
827 err = tg3_phy_reset_5703_4_5(tp);
833 err = tg3_bmcr_reset(tp);
838 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
839 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
840 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
841 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
843 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
844 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
846 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
847 tg3_writephy(tp, 0x1c, 0x8d68);
848 tg3_writephy(tp, 0x1c, 0x8d68);
850 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
851 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
852 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
853 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
855 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
856 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
857 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
858 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
860 /* Set Extended packet length bit (bit 14) on all chips that */
861 /* support jumbo frames */
862 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
863 /* Cannot do read-modify-write on 5401 */
864 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
865 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
866 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
869 /* Set bit 14 with read-modify-write to preserve other bits */
870 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
871 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
872 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
874 tg3_phy_set_wirespeed(tp);
878 static void tg3_frob_aux_power(struct tg3 *tp)
880 struct tg3 *tp_peer = tp;
882 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
886 tp_peer = pci_get_drvdata(tp->pdev_peer);
892 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
893 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
896 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
897 (GRC_LCLCTRL_GPIO_OE0 |
898 GRC_LCLCTRL_GPIO_OE1 |
899 GRC_LCLCTRL_GPIO_OE2 |
900 GRC_LCLCTRL_GPIO_OUTPUT0 |
901 GRC_LCLCTRL_GPIO_OUTPUT1));
908 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
911 /* On 5753 and variants, GPIO2 cannot be used. */
912 no_gpio2 = tp->nic_sram_data_cfg &
913 NIC_SRAM_DATA_CFG_NO_GPIO2;
915 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
916 GRC_LCLCTRL_GPIO_OE1 |
917 GRC_LCLCTRL_GPIO_OE2 |
918 GRC_LCLCTRL_GPIO_OUTPUT1 |
919 GRC_LCLCTRL_GPIO_OUTPUT2;
921 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
922 GRC_LCLCTRL_GPIO_OUTPUT2);
924 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
928 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
930 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
936 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
942 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
943 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
945 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
948 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
949 (GRC_LCLCTRL_GPIO_OE1 |
950 GRC_LCLCTRL_GPIO_OUTPUT1));
953 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
954 (GRC_LCLCTRL_GPIO_OE1));
957 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958 (GRC_LCLCTRL_GPIO_OE1 |
959 GRC_LCLCTRL_GPIO_OUTPUT1));
965 static int tg3_setup_phy(struct tg3 *, int);
967 #define RESET_KIND_SHUTDOWN 0
968 #define RESET_KIND_INIT 1
969 #define RESET_KIND_SUSPEND 2
971 static void tg3_write_sig_post_reset(struct tg3 *, int);
973 static int tg3_set_power_state(struct tg3 *tp, int state)
976 u16 power_control, power_caps;
979 /* Make sure register accesses (indirect or otherwise)
980 * will function correctly.
982 pci_write_config_dword(tp->pdev,
983 TG3PCI_MISC_HOST_CTRL,
986 pci_read_config_word(tp->pdev,
989 power_control |= PCI_PM_CTRL_PME_STATUS;
990 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
994 pci_write_config_word(tp->pdev,
997 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1015 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1017 tp->dev->name, state);
1021 power_control |= PCI_PM_CTRL_PME_ENABLE;
1023 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1024 tw32(TG3PCI_MISC_HOST_CTRL,
1025 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1027 if (tp->link_config.phy_is_low_power == 0) {
1028 tp->link_config.phy_is_low_power = 1;
1029 tp->link_config.orig_speed = tp->link_config.speed;
1030 tp->link_config.orig_duplex = tp->link_config.duplex;
1031 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1034 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1035 tp->link_config.speed = SPEED_10;
1036 tp->link_config.duplex = DUPLEX_HALF;
1037 tp->link_config.autoneg = AUTONEG_ENABLE;
1038 tg3_setup_phy(tp, 0);
1041 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1043 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1046 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1047 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1050 mac_mode = MAC_MODE_PORT_MODE_MII;
1052 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1053 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1054 mac_mode |= MAC_MODE_LINK_POLARITY;
1056 mac_mode = MAC_MODE_PORT_MODE_TBI;
1059 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1060 tw32(MAC_LED_CTRL, tp->led_ctrl);
1062 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1063 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1064 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1066 tw32_f(MAC_MODE, mac_mode);
1069 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1073 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1074 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1078 base_val = tp->pci_clock_ctrl;
1079 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1080 CLOCK_CTRL_TXCLK_DISABLE);
1082 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1084 CLOCK_CTRL_PWRDOWN_PLL133);
1086 } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1087 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1088 u32 newbits1, newbits2;
1090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1092 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1093 CLOCK_CTRL_TXCLK_DISABLE |
1095 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1096 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1098 newbits1 = CLOCK_CTRL_625_CORE;
1099 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1101 newbits1 = CLOCK_CTRL_ALTCLK;
1102 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1105 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1108 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1112 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1117 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1118 CLOCK_CTRL_TXCLK_DISABLE |
1119 CLOCK_CTRL_44MHZ_CORE);
1121 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1124 tw32_f(TG3PCI_CLOCK_CTRL,
1125 tp->pci_clock_ctrl | newbits3);
1130 tg3_frob_aux_power(tp);
1132 /* Finally, set the new power state. */
1133 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1135 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1140 static void tg3_link_report(struct tg3 *tp)
1142 if (!netif_carrier_ok(tp->dev)) {
1143 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1145 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1147 (tp->link_config.active_speed == SPEED_1000 ?
1149 (tp->link_config.active_speed == SPEED_100 ?
1151 (tp->link_config.active_duplex == DUPLEX_FULL ?
1154 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1157 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1158 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1162 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1164 u32 new_tg3_flags = 0;
1165 u32 old_rx_mode = tp->rx_mode;
1166 u32 old_tx_mode = tp->tx_mode;
1168 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1169 if (local_adv & ADVERTISE_PAUSE_CAP) {
1170 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1171 if (remote_adv & LPA_PAUSE_CAP)
1173 (TG3_FLAG_RX_PAUSE |
1175 else if (remote_adv & LPA_PAUSE_ASYM)
1177 (TG3_FLAG_RX_PAUSE);
1179 if (remote_adv & LPA_PAUSE_CAP)
1181 (TG3_FLAG_RX_PAUSE |
1184 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1185 if ((remote_adv & LPA_PAUSE_CAP) &&
1186 (remote_adv & LPA_PAUSE_ASYM))
1187 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1190 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1191 tp->tg3_flags |= new_tg3_flags;
1193 new_tg3_flags = tp->tg3_flags;
1196 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1197 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1199 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1201 if (old_rx_mode != tp->rx_mode) {
1202 tw32_f(MAC_RX_MODE, tp->rx_mode);
1205 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1206 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1208 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1210 if (old_tx_mode != tp->tx_mode) {
1211 tw32_f(MAC_TX_MODE, tp->tx_mode);
1215 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1217 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1218 case MII_TG3_AUX_STAT_10HALF:
1220 *duplex = DUPLEX_HALF;
1223 case MII_TG3_AUX_STAT_10FULL:
1225 *duplex = DUPLEX_FULL;
1228 case MII_TG3_AUX_STAT_100HALF:
1230 *duplex = DUPLEX_HALF;
1233 case MII_TG3_AUX_STAT_100FULL:
1235 *duplex = DUPLEX_FULL;
1238 case MII_TG3_AUX_STAT_1000HALF:
1239 *speed = SPEED_1000;
1240 *duplex = DUPLEX_HALF;
1243 case MII_TG3_AUX_STAT_1000FULL:
1244 *speed = SPEED_1000;
1245 *duplex = DUPLEX_FULL;
1249 *speed = SPEED_INVALID;
1250 *duplex = DUPLEX_INVALID;
1255 static void tg3_phy_copper_begin(struct tg3 *tp)
1260 if (tp->link_config.phy_is_low_power) {
1261 /* Entering low power mode. Disable gigabit and
1262 * 100baseT advertisements.
1264 tg3_writephy(tp, MII_TG3_CTRL, 0);
1266 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1267 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1268 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1269 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1271 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1272 } else if (tp->link_config.speed == SPEED_INVALID) {
1273 tp->link_config.advertising =
1274 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1275 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1276 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1277 ADVERTISED_Autoneg | ADVERTISED_MII);
1279 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1280 tp->link_config.advertising &=
1281 ~(ADVERTISED_1000baseT_Half |
1282 ADVERTISED_1000baseT_Full);
1284 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1285 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1286 new_adv |= ADVERTISE_10HALF;
1287 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1288 new_adv |= ADVERTISE_10FULL;
1289 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1290 new_adv |= ADVERTISE_100HALF;
1291 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1292 new_adv |= ADVERTISE_100FULL;
1293 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1295 if (tp->link_config.advertising &
1296 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1298 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1299 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1300 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1301 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1302 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1303 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1304 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1305 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1306 MII_TG3_CTRL_ENABLE_AS_MASTER);
1307 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1309 tg3_writephy(tp, MII_TG3_CTRL, 0);
1312 /* Asking for a specific link mode. */
1313 if (tp->link_config.speed == SPEED_1000) {
1314 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1315 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1317 if (tp->link_config.duplex == DUPLEX_FULL)
1318 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1320 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1321 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1322 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1323 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1324 MII_TG3_CTRL_ENABLE_AS_MASTER);
1325 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1327 tg3_writephy(tp, MII_TG3_CTRL, 0);
1329 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1330 if (tp->link_config.speed == SPEED_100) {
1331 if (tp->link_config.duplex == DUPLEX_FULL)
1332 new_adv |= ADVERTISE_100FULL;
1334 new_adv |= ADVERTISE_100HALF;
1336 if (tp->link_config.duplex == DUPLEX_FULL)
1337 new_adv |= ADVERTISE_10FULL;
1339 new_adv |= ADVERTISE_10HALF;
1341 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1345 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1346 tp->link_config.speed != SPEED_INVALID) {
1347 u32 bmcr, orig_bmcr;
1349 tp->link_config.active_speed = tp->link_config.speed;
1350 tp->link_config.active_duplex = tp->link_config.duplex;
1353 switch (tp->link_config.speed) {
1359 bmcr |= BMCR_SPEED100;
1363 bmcr |= TG3_BMCR_SPEED1000;
1367 if (tp->link_config.duplex == DUPLEX_FULL)
1368 bmcr |= BMCR_FULLDPLX;
1370 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1371 (bmcr != orig_bmcr)) {
1372 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1373 for (i = 0; i < 1500; i++) {
1377 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1378 tg3_readphy(tp, MII_BMSR, &tmp))
1380 if (!(tmp & BMSR_LSTATUS)) {
1385 tg3_writephy(tp, MII_BMCR, bmcr);
1389 tg3_writephy(tp, MII_BMCR,
1390 BMCR_ANENABLE | BMCR_ANRESTART);
1394 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1398 /* Turn off tap power management. */
1399 /* Set Extended packet length bit */
1400 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1402 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1403 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1405 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1406 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1408 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1409 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1411 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1412 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1414 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1415 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1422 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1424 u32 adv_reg, all_mask;
1426 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1429 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1430 ADVERTISE_100HALF | ADVERTISE_100FULL);
1431 if ((adv_reg & all_mask) != all_mask)
1433 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1436 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1439 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1440 MII_TG3_CTRL_ADV_1000_FULL);
1441 if ((tg3_ctrl & all_mask) != all_mask)
1447 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1449 int current_link_up;
1458 (MAC_STATUS_SYNC_CHANGED |
1459 MAC_STATUS_CFG_CHANGED |
1460 MAC_STATUS_MI_COMPLETION |
1461 MAC_STATUS_LNKSTATE_CHANGED));
1464 tp->mi_mode = MAC_MI_MODE_BASE;
1465 tw32_f(MAC_MI_MODE, tp->mi_mode);
1468 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1470 /* Some third-party PHYs need to be reset on link going
1473 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1476 netif_carrier_ok(tp->dev)) {
1477 tg3_readphy(tp, MII_BMSR, &bmsr);
1478 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1479 !(bmsr & BMSR_LSTATUS))
1485 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1486 tg3_readphy(tp, MII_BMSR, &bmsr);
1487 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1488 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1491 if (!(bmsr & BMSR_LSTATUS)) {
1492 err = tg3_init_5401phy_dsp(tp);
1496 tg3_readphy(tp, MII_BMSR, &bmsr);
1497 for (i = 0; i < 1000; i++) {
1499 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1500 (bmsr & BMSR_LSTATUS)) {
1506 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1507 !(bmsr & BMSR_LSTATUS) &&
1508 tp->link_config.active_speed == SPEED_1000) {
1509 err = tg3_phy_reset(tp);
1511 err = tg3_init_5401phy_dsp(tp);
1516 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1517 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1518 /* 5701 {A0,B0} CRC bug workaround */
1519 tg3_writephy(tp, 0x15, 0x0a75);
1520 tg3_writephy(tp, 0x1c, 0x8c68);
1521 tg3_writephy(tp, 0x1c, 0x8d68);
1522 tg3_writephy(tp, 0x1c, 0x8c68);
1525 /* Clear pending interrupts... */
1526 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1527 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1529 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1530 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1532 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1534 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1536 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1537 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1538 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1540 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1543 current_link_up = 0;
1544 current_speed = SPEED_INVALID;
1545 current_duplex = DUPLEX_INVALID;
1547 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1550 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1551 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1552 if (!(val & (1 << 10))) {
1554 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1560 for (i = 0; i < 100; i++) {
1561 tg3_readphy(tp, MII_BMSR, &bmsr);
1562 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1563 (bmsr & BMSR_LSTATUS))
1568 if (bmsr & BMSR_LSTATUS) {
1571 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1572 for (i = 0; i < 2000; i++) {
1574 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1579 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1584 for (i = 0; i < 200; i++) {
1585 tg3_readphy(tp, MII_BMCR, &bmcr);
1586 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1588 if (bmcr && bmcr != 0x7fff)
1593 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1594 if (bmcr & BMCR_ANENABLE) {
1595 current_link_up = 1;
1597 /* Force autoneg restart if we are exiting
1600 if (!tg3_copper_is_advertising_all(tp))
1601 current_link_up = 0;
1603 current_link_up = 0;
1606 if (!(bmcr & BMCR_ANENABLE) &&
1607 tp->link_config.speed == current_speed &&
1608 tp->link_config.duplex == current_duplex) {
1609 current_link_up = 1;
1611 current_link_up = 0;
1615 tp->link_config.active_speed = current_speed;
1616 tp->link_config.active_duplex = current_duplex;
1619 if (current_link_up == 1 &&
1620 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1621 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1622 u32 local_adv, remote_adv;
1624 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1626 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1628 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1631 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1633 /* If we are not advertising full pause capability,
1634 * something is wrong. Bring the link down and reconfigure.
1636 if (local_adv != ADVERTISE_PAUSE_CAP) {
1637 current_link_up = 0;
1639 tg3_setup_flow_control(tp, local_adv, remote_adv);
1643 if (current_link_up == 0) {
1646 tg3_phy_copper_begin(tp);
1648 tg3_readphy(tp, MII_BMSR, &tmp);
1649 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1650 (tmp & BMSR_LSTATUS))
1651 current_link_up = 1;
1654 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1655 if (current_link_up == 1) {
1656 if (tp->link_config.active_speed == SPEED_100 ||
1657 tp->link_config.active_speed == SPEED_10)
1658 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1660 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1662 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1664 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1665 if (tp->link_config.active_duplex == DUPLEX_HALF)
1666 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1668 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1670 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1671 (current_link_up == 1 &&
1672 tp->link_config.active_speed == SPEED_10))
1673 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1675 if (current_link_up == 1)
1676 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1679 /* ??? Without this setting Netgear GA302T PHY does not
1680 * ??? send/receive packets...
1682 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1683 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1684 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1685 tw32_f(MAC_MI_MODE, tp->mi_mode);
1689 tw32_f(MAC_MODE, tp->mac_mode);
1692 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1693 /* Polled via timer. */
1694 tw32_f(MAC_EVENT, 0);
1696 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1701 current_link_up == 1 &&
1702 tp->link_config.active_speed == SPEED_1000 &&
1703 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1704 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1707 (MAC_STATUS_SYNC_CHANGED |
1708 MAC_STATUS_CFG_CHANGED));
1711 NIC_SRAM_FIRMWARE_MBOX,
1712 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1715 if (current_link_up != netif_carrier_ok(tp->dev)) {
1716 if (current_link_up)
1717 netif_carrier_on(tp->dev);
1719 netif_carrier_off(tp->dev);
1720 tg3_link_report(tp);
1726 struct tg3_fiber_aneginfo {
1728 #define ANEG_STATE_UNKNOWN 0
1729 #define ANEG_STATE_AN_ENABLE 1
1730 #define ANEG_STATE_RESTART_INIT 2
1731 #define ANEG_STATE_RESTART 3
1732 #define ANEG_STATE_DISABLE_LINK_OK 4
1733 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1734 #define ANEG_STATE_ABILITY_DETECT 6
1735 #define ANEG_STATE_ACK_DETECT_INIT 7
1736 #define ANEG_STATE_ACK_DETECT 8
1737 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1738 #define ANEG_STATE_COMPLETE_ACK 10
1739 #define ANEG_STATE_IDLE_DETECT_INIT 11
1740 #define ANEG_STATE_IDLE_DETECT 12
1741 #define ANEG_STATE_LINK_OK 13
1742 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1743 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1746 #define MR_AN_ENABLE 0x00000001
1747 #define MR_RESTART_AN 0x00000002
1748 #define MR_AN_COMPLETE 0x00000004
1749 #define MR_PAGE_RX 0x00000008
1750 #define MR_NP_LOADED 0x00000010
1751 #define MR_TOGGLE_TX 0x00000020
1752 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1753 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1754 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1755 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1756 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1757 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1758 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1759 #define MR_TOGGLE_RX 0x00002000
1760 #define MR_NP_RX 0x00004000
1762 #define MR_LINK_OK 0x80000000
1764 unsigned long link_time, cur_time;
1766 u32 ability_match_cfg;
1767 int ability_match_count;
1769 char ability_match, idle_match, ack_match;
1771 u32 txconfig, rxconfig;
1772 #define ANEG_CFG_NP 0x00000080
1773 #define ANEG_CFG_ACK 0x00000040
1774 #define ANEG_CFG_RF2 0x00000020
1775 #define ANEG_CFG_RF1 0x00000010
1776 #define ANEG_CFG_PS2 0x00000001
1777 #define ANEG_CFG_PS1 0x00008000
1778 #define ANEG_CFG_HD 0x00004000
1779 #define ANEG_CFG_FD 0x00002000
1780 #define ANEG_CFG_INVAL 0x00001f06
1785 #define ANEG_TIMER_ENAB 2
1786 #define ANEG_FAILED -1
1788 #define ANEG_STATE_SETTLE_TIME 10000
1790 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1791 struct tg3_fiber_aneginfo *ap)
1793 unsigned long delta;
1797 if (ap->state == ANEG_STATE_UNKNOWN) {
1801 ap->ability_match_cfg = 0;
1802 ap->ability_match_count = 0;
1803 ap->ability_match = 0;
1809 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1810 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1812 if (rx_cfg_reg != ap->ability_match_cfg) {
1813 ap->ability_match_cfg = rx_cfg_reg;
1814 ap->ability_match = 0;
1815 ap->ability_match_count = 0;
1817 if (++ap->ability_match_count > 1) {
1818 ap->ability_match = 1;
1819 ap->ability_match_cfg = rx_cfg_reg;
1822 if (rx_cfg_reg & ANEG_CFG_ACK)
1830 ap->ability_match_cfg = 0;
1831 ap->ability_match_count = 0;
1832 ap->ability_match = 0;
1838 ap->rxconfig = rx_cfg_reg;
1842 case ANEG_STATE_UNKNOWN:
1843 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1844 ap->state = ANEG_STATE_AN_ENABLE;
1847 case ANEG_STATE_AN_ENABLE:
1848 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1849 if (ap->flags & MR_AN_ENABLE) {
1852 ap->ability_match_cfg = 0;
1853 ap->ability_match_count = 0;
1854 ap->ability_match = 0;
1858 ap->state = ANEG_STATE_RESTART_INIT;
1860 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1864 case ANEG_STATE_RESTART_INIT:
1865 ap->link_time = ap->cur_time;
1866 ap->flags &= ~(MR_NP_LOADED);
1868 tw32(MAC_TX_AUTO_NEG, 0);
1869 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1870 tw32_f(MAC_MODE, tp->mac_mode);
1873 ret = ANEG_TIMER_ENAB;
1874 ap->state = ANEG_STATE_RESTART;
1877 case ANEG_STATE_RESTART:
1878 delta = ap->cur_time - ap->link_time;
1879 if (delta > ANEG_STATE_SETTLE_TIME) {
1880 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1882 ret = ANEG_TIMER_ENAB;
1886 case ANEG_STATE_DISABLE_LINK_OK:
1890 case ANEG_STATE_ABILITY_DETECT_INIT:
1891 ap->flags &= ~(MR_TOGGLE_TX);
1892 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1893 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1894 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1895 tw32_f(MAC_MODE, tp->mac_mode);
1898 ap->state = ANEG_STATE_ABILITY_DETECT;
1901 case ANEG_STATE_ABILITY_DETECT:
1902 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1903 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1907 case ANEG_STATE_ACK_DETECT_INIT:
1908 ap->txconfig |= ANEG_CFG_ACK;
1909 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1910 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1911 tw32_f(MAC_MODE, tp->mac_mode);
1914 ap->state = ANEG_STATE_ACK_DETECT;
1917 case ANEG_STATE_ACK_DETECT:
1918 if (ap->ack_match != 0) {
1919 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1920 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1921 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1923 ap->state = ANEG_STATE_AN_ENABLE;
1925 } else if (ap->ability_match != 0 &&
1926 ap->rxconfig == 0) {
1927 ap->state = ANEG_STATE_AN_ENABLE;
1931 case ANEG_STATE_COMPLETE_ACK_INIT:
1932 if (ap->rxconfig & ANEG_CFG_INVAL) {
1936 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1937 MR_LP_ADV_HALF_DUPLEX |
1938 MR_LP_ADV_SYM_PAUSE |
1939 MR_LP_ADV_ASYM_PAUSE |
1940 MR_LP_ADV_REMOTE_FAULT1 |
1941 MR_LP_ADV_REMOTE_FAULT2 |
1942 MR_LP_ADV_NEXT_PAGE |
1945 if (ap->rxconfig & ANEG_CFG_FD)
1946 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1947 if (ap->rxconfig & ANEG_CFG_HD)
1948 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1949 if (ap->rxconfig & ANEG_CFG_PS1)
1950 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1951 if (ap->rxconfig & ANEG_CFG_PS2)
1952 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1953 if (ap->rxconfig & ANEG_CFG_RF1)
1954 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1955 if (ap->rxconfig & ANEG_CFG_RF2)
1956 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1957 if (ap->rxconfig & ANEG_CFG_NP)
1958 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1960 ap->link_time = ap->cur_time;
1962 ap->flags ^= (MR_TOGGLE_TX);
1963 if (ap->rxconfig & 0x0008)
1964 ap->flags |= MR_TOGGLE_RX;
1965 if (ap->rxconfig & ANEG_CFG_NP)
1966 ap->flags |= MR_NP_RX;
1967 ap->flags |= MR_PAGE_RX;
1969 ap->state = ANEG_STATE_COMPLETE_ACK;
1970 ret = ANEG_TIMER_ENAB;
1973 case ANEG_STATE_COMPLETE_ACK:
1974 if (ap->ability_match != 0 &&
1975 ap->rxconfig == 0) {
1976 ap->state = ANEG_STATE_AN_ENABLE;
1979 delta = ap->cur_time - ap->link_time;
1980 if (delta > ANEG_STATE_SETTLE_TIME) {
1981 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1982 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1984 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1985 !(ap->flags & MR_NP_RX)) {
1986 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1994 case ANEG_STATE_IDLE_DETECT_INIT:
1995 ap->link_time = ap->cur_time;
1996 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1997 tw32_f(MAC_MODE, tp->mac_mode);
2000 ap->state = ANEG_STATE_IDLE_DETECT;
2001 ret = ANEG_TIMER_ENAB;
2004 case ANEG_STATE_IDLE_DETECT:
2005 if (ap->ability_match != 0 &&
2006 ap->rxconfig == 0) {
2007 ap->state = ANEG_STATE_AN_ENABLE;
2010 delta = ap->cur_time - ap->link_time;
2011 if (delta > ANEG_STATE_SETTLE_TIME) {
2012 /* XXX another gem from the Broadcom driver :( */
2013 ap->state = ANEG_STATE_LINK_OK;
2017 case ANEG_STATE_LINK_OK:
2018 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2022 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2023 /* ??? unimplemented */
2026 case ANEG_STATE_NEXT_PAGE_WAIT:
2027 /* ??? unimplemented */
2038 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2041 struct tg3_fiber_aneginfo aninfo;
2042 int status = ANEG_FAILED;
2046 tw32_f(MAC_TX_AUTO_NEG, 0);
2048 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2049 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2052 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2055 memset(&aninfo, 0, sizeof(aninfo));
2056 aninfo.flags |= MR_AN_ENABLE;
2057 aninfo.state = ANEG_STATE_UNKNOWN;
2058 aninfo.cur_time = 0;
2060 while (++tick < 195000) {
2061 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2062 if (status == ANEG_DONE || status == ANEG_FAILED)
2068 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2069 tw32_f(MAC_MODE, tp->mac_mode);
2072 *flags = aninfo.flags;
2074 if (status == ANEG_DONE &&
2075 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2076 MR_LP_ADV_FULL_DUPLEX)))
2082 static void tg3_init_bcm8002(struct tg3 *tp)
2084 u32 mac_status = tr32(MAC_STATUS);
2087 /* Reset when initting first time or we have a link. */
2088 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2089 !(mac_status & MAC_STATUS_PCS_SYNCED))
2092 /* Set PLL lock range. */
2093 tg3_writephy(tp, 0x16, 0x8007);
2096 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2098 /* Wait for reset to complete. */
2099 /* XXX schedule_timeout() ... */
2100 for (i = 0; i < 500; i++)
2103 /* Config mode; select PMA/Ch 1 regs. */
2104 tg3_writephy(tp, 0x10, 0x8411);
2106 /* Enable auto-lock and comdet, select txclk for tx. */
2107 tg3_writephy(tp, 0x11, 0x0a10);
2109 tg3_writephy(tp, 0x18, 0x00a0);
2110 tg3_writephy(tp, 0x16, 0x41ff);
2112 /* Assert and deassert POR. */
2113 tg3_writephy(tp, 0x13, 0x0400);
2115 tg3_writephy(tp, 0x13, 0x0000);
2117 tg3_writephy(tp, 0x11, 0x0a50);
2119 tg3_writephy(tp, 0x11, 0x0a10);
2121 /* Wait for signal to stabilize */
2122 /* XXX schedule_timeout() ... */
2123 for (i = 0; i < 15000; i++)
2126 /* Deselect the channel register so we can read the PHYID
2129 tg3_writephy(tp, 0x10, 0x8011);
2132 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2134 u32 sg_dig_ctrl, sg_dig_status;
2135 u32 serdes_cfg, expected_sg_dig_ctrl;
2136 int workaround, port_a;
2137 int current_link_up;
2140 expected_sg_dig_ctrl = 0;
2143 current_link_up = 0;
2145 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2146 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2148 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2151 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2152 /* preserve bits 20-23 for voltage regulator */
2153 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2156 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2158 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2159 if (sg_dig_ctrl & (1 << 31)) {
2161 u32 val = serdes_cfg;
2167 tw32_f(MAC_SERDES_CFG, val);
2169 tw32_f(SG_DIG_CTRL, 0x01388400);
2171 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2172 tg3_setup_flow_control(tp, 0, 0);
2173 current_link_up = 1;
2178 /* Want auto-negotiation. */
2179 expected_sg_dig_ctrl = 0x81388400;
2181 /* Pause capability */
2182 expected_sg_dig_ctrl |= (1 << 11);
2184 /* Asymettric pause */
2185 expected_sg_dig_ctrl |= (1 << 12);
2187 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2189 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2190 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2192 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2194 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2195 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2196 MAC_STATUS_SIGNAL_DET)) {
2199 /* Giver time to negotiate (~200ms) */
2200 for (i = 0; i < 40000; i++) {
2201 sg_dig_status = tr32(SG_DIG_STATUS);
2202 if (sg_dig_status & (0x3))
2206 mac_status = tr32(MAC_STATUS);
2208 if ((sg_dig_status & (1 << 1)) &&
2209 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2210 u32 local_adv, remote_adv;
2212 local_adv = ADVERTISE_PAUSE_CAP;
2214 if (sg_dig_status & (1 << 19))
2215 remote_adv |= LPA_PAUSE_CAP;
2216 if (sg_dig_status & (1 << 20))
2217 remote_adv |= LPA_PAUSE_ASYM;
2219 tg3_setup_flow_control(tp, local_adv, remote_adv);
2220 current_link_up = 1;
2221 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2222 } else if (!(sg_dig_status & (1 << 1))) {
2223 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2224 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2227 u32 val = serdes_cfg;
2234 tw32_f(MAC_SERDES_CFG, val);
2237 tw32_f(SG_DIG_CTRL, 0x01388400);
2240 /* Link parallel detection - link is up */
2241 /* only if we have PCS_SYNC and not */
2242 /* receiving config code words */
2243 mac_status = tr32(MAC_STATUS);
2244 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2245 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2246 tg3_setup_flow_control(tp, 0, 0);
2247 current_link_up = 1;
2254 return current_link_up;
2257 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2259 int current_link_up = 0;
2261 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2262 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2266 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2270 if (fiber_autoneg(tp, &flags)) {
2271 u32 local_adv, remote_adv;
2273 local_adv = ADVERTISE_PAUSE_CAP;
2275 if (flags & MR_LP_ADV_SYM_PAUSE)
2276 remote_adv |= LPA_PAUSE_CAP;
2277 if (flags & MR_LP_ADV_ASYM_PAUSE)
2278 remote_adv |= LPA_PAUSE_ASYM;
2280 tg3_setup_flow_control(tp, local_adv, remote_adv);
2282 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2283 current_link_up = 1;
2285 for (i = 0; i < 30; i++) {
2288 (MAC_STATUS_SYNC_CHANGED |
2289 MAC_STATUS_CFG_CHANGED));
2291 if ((tr32(MAC_STATUS) &
2292 (MAC_STATUS_SYNC_CHANGED |
2293 MAC_STATUS_CFG_CHANGED)) == 0)
2297 mac_status = tr32(MAC_STATUS);
2298 if (current_link_up == 0 &&
2299 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2300 !(mac_status & MAC_STATUS_RCVD_CFG))
2301 current_link_up = 1;
2303 /* Forcing 1000FD link up. */
2304 current_link_up = 1;
2305 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2307 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2312 return current_link_up;
2315 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2318 u16 orig_active_speed;
2319 u8 orig_active_duplex;
2321 int current_link_up;
2325 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2326 TG3_FLAG_TX_PAUSE));
2327 orig_active_speed = tp->link_config.active_speed;
2328 orig_active_duplex = tp->link_config.active_duplex;
2330 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2331 netif_carrier_ok(tp->dev) &&
2332 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2333 mac_status = tr32(MAC_STATUS);
2334 mac_status &= (MAC_STATUS_PCS_SYNCED |
2335 MAC_STATUS_SIGNAL_DET |
2336 MAC_STATUS_CFG_CHANGED |
2337 MAC_STATUS_RCVD_CFG);
2338 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2339 MAC_STATUS_SIGNAL_DET)) {
2340 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2341 MAC_STATUS_CFG_CHANGED));
2346 tw32_f(MAC_TX_AUTO_NEG, 0);
2348 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2349 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2350 tw32_f(MAC_MODE, tp->mac_mode);
2353 if (tp->phy_id == PHY_ID_BCM8002)
2354 tg3_init_bcm8002(tp);
2356 /* Enable link change event even when serdes polling. */
2357 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2360 current_link_up = 0;
2361 mac_status = tr32(MAC_STATUS);
2363 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2364 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2366 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2368 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2369 tw32_f(MAC_MODE, tp->mac_mode);
2372 tp->hw_status->status =
2373 (SD_STATUS_UPDATED |
2374 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2376 for (i = 0; i < 100; i++) {
2377 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2378 MAC_STATUS_CFG_CHANGED));
2380 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2381 MAC_STATUS_CFG_CHANGED)) == 0)
2385 mac_status = tr32(MAC_STATUS);
2386 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2387 current_link_up = 0;
2388 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2389 tw32_f(MAC_MODE, (tp->mac_mode |
2390 MAC_MODE_SEND_CONFIGS));
2392 tw32_f(MAC_MODE, tp->mac_mode);
2396 if (current_link_up == 1) {
2397 tp->link_config.active_speed = SPEED_1000;
2398 tp->link_config.active_duplex = DUPLEX_FULL;
2399 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2400 LED_CTRL_LNKLED_OVERRIDE |
2401 LED_CTRL_1000MBPS_ON));
2403 tp->link_config.active_speed = SPEED_INVALID;
2404 tp->link_config.active_duplex = DUPLEX_INVALID;
2405 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2406 LED_CTRL_LNKLED_OVERRIDE |
2407 LED_CTRL_TRAFFIC_OVERRIDE));
2410 if (current_link_up != netif_carrier_ok(tp->dev)) {
2411 if (current_link_up)
2412 netif_carrier_on(tp->dev);
2414 netif_carrier_off(tp->dev);
2415 tg3_link_report(tp);
2418 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2420 if (orig_pause_cfg != now_pause_cfg ||
2421 orig_active_speed != tp->link_config.active_speed ||
2422 orig_active_duplex != tp->link_config.active_duplex)
2423 tg3_link_report(tp);
2429 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2433 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2434 err = tg3_setup_fiber_phy(tp, force_reset);
2436 err = tg3_setup_copper_phy(tp, force_reset);
2439 if (tp->link_config.active_speed == SPEED_1000 &&
2440 tp->link_config.active_duplex == DUPLEX_HALF)
2441 tw32(MAC_TX_LENGTHS,
2442 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2443 (6 << TX_LENGTHS_IPG_SHIFT) |
2444 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2446 tw32(MAC_TX_LENGTHS,
2447 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2448 (6 << TX_LENGTHS_IPG_SHIFT) |
2449 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2451 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2452 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2453 if (netif_carrier_ok(tp->dev)) {
2454 tw32(HOSTCC_STAT_COAL_TICKS,
2455 DEFAULT_STAT_COAL_TICKS);
2457 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2464 /* Tigon3 never reports partial packet sends. So we do not
2465 * need special logic to handle SKBs that have not had all
2466 * of their frags sent yet, like SunGEM does.
2468 static void tg3_tx(struct tg3 *tp)
2470 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2471 u32 sw_idx = tp->tx_cons;
2473 while (sw_idx != hw_idx) {
2474 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2475 struct sk_buff *skb = ri->skb;
2478 if (unlikely(skb == NULL))
2481 pci_unmap_single(tp->pdev,
2482 pci_unmap_addr(ri, mapping),
2488 sw_idx = NEXT_TX(sw_idx);
2490 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2491 if (unlikely(sw_idx == hw_idx))
2494 ri = &tp->tx_buffers[sw_idx];
2495 if (unlikely(ri->skb != NULL))
2498 pci_unmap_page(tp->pdev,
2499 pci_unmap_addr(ri, mapping),
2500 skb_shinfo(skb)->frags[i].size,
2503 sw_idx = NEXT_TX(sw_idx);
2506 dev_kfree_skb_irq(skb);
2509 tp->tx_cons = sw_idx;
2511 if (netif_queue_stopped(tp->dev) &&
2512 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2513 netif_wake_queue(tp->dev);
2516 /* Returns size of skb allocated or < 0 on error.
2518 * We only need to fill in the address because the other members
2519 * of the RX descriptor are invariant, see tg3_init_rings.
2521 * Note the purposeful assymetry of cpu vs. chip accesses. For
2522 * posting buffers we only dirty the first cache line of the RX
2523 * descriptor (containing the address). Whereas for the RX status
2524 * buffers the cpu only reads the last cacheline of the RX descriptor
2525 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2527 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2528 int src_idx, u32 dest_idx_unmasked)
2530 struct tg3_rx_buffer_desc *desc;
2531 struct ring_info *map, *src_map;
2532 struct sk_buff *skb;
2534 int skb_size, dest_idx;
2537 switch (opaque_key) {
2538 case RXD_OPAQUE_RING_STD:
2539 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2540 desc = &tp->rx_std[dest_idx];
2541 map = &tp->rx_std_buffers[dest_idx];
2543 src_map = &tp->rx_std_buffers[src_idx];
2544 skb_size = RX_PKT_BUF_SZ;
2547 case RXD_OPAQUE_RING_JUMBO:
2548 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2549 desc = &tp->rx_jumbo[dest_idx];
2550 map = &tp->rx_jumbo_buffers[dest_idx];
2552 src_map = &tp->rx_jumbo_buffers[src_idx];
2553 skb_size = RX_JUMBO_PKT_BUF_SZ;
2560 /* Do not overwrite any of the map or rp information
2561 * until we are sure we can commit to a new buffer.
2563 * Callers depend upon this behavior and assume that
2564 * we leave everything unchanged if we fail.
2566 skb = dev_alloc_skb(skb_size);
2571 skb_reserve(skb, tp->rx_offset);
2573 mapping = pci_map_single(tp->pdev, skb->data,
2574 skb_size - tp->rx_offset,
2575 PCI_DMA_FROMDEVICE);
2578 pci_unmap_addr_set(map, mapping, mapping);
2580 if (src_map != NULL)
2581 src_map->skb = NULL;
2583 desc->addr_hi = ((u64)mapping >> 32);
2584 desc->addr_lo = ((u64)mapping & 0xffffffff);
2589 /* We only need to move over in the address because the other
2590 * members of the RX descriptor are invariant. See notes above
2591 * tg3_alloc_rx_skb for full details.
2593 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2594 int src_idx, u32 dest_idx_unmasked)
2596 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2597 struct ring_info *src_map, *dest_map;
2600 switch (opaque_key) {
2601 case RXD_OPAQUE_RING_STD:
2602 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2603 dest_desc = &tp->rx_std[dest_idx];
2604 dest_map = &tp->rx_std_buffers[dest_idx];
2605 src_desc = &tp->rx_std[src_idx];
2606 src_map = &tp->rx_std_buffers[src_idx];
2609 case RXD_OPAQUE_RING_JUMBO:
2610 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2611 dest_desc = &tp->rx_jumbo[dest_idx];
2612 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2613 src_desc = &tp->rx_jumbo[src_idx];
2614 src_map = &tp->rx_jumbo_buffers[src_idx];
2621 dest_map->skb = src_map->skb;
2622 pci_unmap_addr_set(dest_map, mapping,
2623 pci_unmap_addr(src_map, mapping));
2624 dest_desc->addr_hi = src_desc->addr_hi;
2625 dest_desc->addr_lo = src_desc->addr_lo;
2627 src_map->skb = NULL;
2630 #if TG3_VLAN_TAG_USED
2631 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2633 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2637 /* The RX ring scheme is composed of multiple rings which post fresh
2638 * buffers to the chip, and one special ring the chip uses to report
2639 * status back to the host.
2641 * The special ring reports the status of received packets to the
2642 * host. The chip does not write into the original descriptor the
2643 * RX buffer was obtained from. The chip simply takes the original
2644 * descriptor as provided by the host, updates the status and length
2645 * field, then writes this into the next status ring entry.
2647 * Each ring the host uses to post buffers to the chip is described
2648 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2649 * it is first placed into the on-chip ram. When the packet's length
2650 * is known, it walks down the TG3_BDINFO entries to select the ring.
2651 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2652 * which is within the range of the new packet's length is chosen.
2654 * The "separate ring for rx status" scheme may sound queer, but it makes
2655 * sense from a cache coherency perspective. If only the host writes
2656 * to the buffer post rings, and only the chip writes to the rx status
2657 * rings, then cache lines never move beyond shared-modified state.
2658 * If both the host and chip were to write into the same ring, cache line
2659 * eviction could occur since both entities want it in an exclusive state.
2661 static int tg3_rx(struct tg3 *tp, int budget)
2664 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2668 hw_idx = tp->hw_status->idx[0].rx_producer;
2670 * We need to order the read of hw_idx and the read of
2671 * the opaque cookie.
2674 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2677 while (sw_idx != hw_idx && budget > 0) {
2678 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2680 struct sk_buff *skb;
2681 dma_addr_t dma_addr;
2682 u32 opaque_key, desc_idx, *post_ptr;
2684 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2685 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2686 if (opaque_key == RXD_OPAQUE_RING_STD) {
2687 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2689 skb = tp->rx_std_buffers[desc_idx].skb;
2690 post_ptr = &tp->rx_std_ptr;
2691 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2692 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2694 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2695 post_ptr = &tp->rx_jumbo_ptr;
2698 goto next_pkt_nopost;
2701 work_mask |= opaque_key;
2703 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2704 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2706 tg3_recycle_rx(tp, opaque_key,
2707 desc_idx, *post_ptr);
2709 /* Other statistics kept track of by card. */
2710 tp->net_stats.rx_dropped++;
2714 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2716 if (len > RX_COPY_THRESHOLD
2717 && tp->rx_offset == 2
2718 /* rx_offset != 2 iff this is a 5701 card running
2719 * in PCI-X mode [see tg3_get_invariants()] */
2723 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2724 desc_idx, *post_ptr);
2728 pci_unmap_single(tp->pdev, dma_addr,
2729 skb_size - tp->rx_offset,
2730 PCI_DMA_FROMDEVICE);
2734 struct sk_buff *copy_skb;
2736 tg3_recycle_rx(tp, opaque_key,
2737 desc_idx, *post_ptr);
2739 copy_skb = dev_alloc_skb(len + 2);
2740 if (copy_skb == NULL)
2741 goto drop_it_no_recycle;
2743 copy_skb->dev = tp->dev;
2744 skb_reserve(copy_skb, 2);
2745 skb_put(copy_skb, len);
2746 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2747 memcpy(copy_skb->data, skb->data, len);
2748 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2750 /* We'll reuse the original ring buffer. */
2754 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2755 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2756 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2757 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2758 skb->ip_summed = CHECKSUM_UNNECESSARY;
2760 skb->ip_summed = CHECKSUM_NONE;
2762 skb->protocol = eth_type_trans(skb, tp->dev);
2763 #if TG3_VLAN_TAG_USED
2764 if (tp->vlgrp != NULL &&
2765 desc->type_flags & RXD_FLAG_VLAN) {
2766 tg3_vlan_rx(tp, skb,
2767 desc->err_vlan & RXD_VLAN_MASK);
2770 netif_receive_skb(skb);
2772 tp->dev->last_rx = jiffies;
2780 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2783 /* ACK the status ring. */
2784 tp->rx_rcb_ptr = rx_rcb_ptr;
2785 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2786 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2788 /* Refill RX ring(s). */
2789 if (work_mask & RXD_OPAQUE_RING_STD) {
2790 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2791 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2794 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2795 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2796 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2804 static int tg3_poll(struct net_device *netdev, int *budget)
2806 struct tg3 *tp = netdev_priv(netdev);
2807 struct tg3_hw_status *sblk = tp->hw_status;
2808 unsigned long flags;
2811 spin_lock_irqsave(&tp->lock, flags);
2813 /* handle link change and other phy events */
2814 if (!(tp->tg3_flags &
2815 (TG3_FLAG_USE_LINKCHG_REG |
2816 TG3_FLAG_POLL_SERDES))) {
2817 if (sblk->status & SD_STATUS_LINK_CHG) {
2818 sblk->status = SD_STATUS_UPDATED |
2819 (sblk->status & ~SD_STATUS_LINK_CHG);
2820 tg3_setup_phy(tp, 0);
2824 /* run TX completion thread */
2825 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2826 spin_lock(&tp->tx_lock);
2828 spin_unlock(&tp->tx_lock);
2831 spin_unlock_irqrestore(&tp->lock, flags);
2833 /* run RX thread, within the bounds set by NAPI.
2834 * All RX "locking" is done by ensuring outside
2835 * code synchronizes with dev->poll()
2838 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2839 int orig_budget = *budget;
2842 if (orig_budget > netdev->quota)
2843 orig_budget = netdev->quota;
2845 work_done = tg3_rx(tp, orig_budget);
2847 *budget -= work_done;
2848 netdev->quota -= work_done;
2850 if (work_done >= orig_budget)
2854 /* if no more work, tell net stack and NIC we're done */
2856 spin_lock_irqsave(&tp->lock, flags);
2857 __netif_rx_complete(netdev);
2858 tg3_restart_ints(tp);
2859 spin_unlock_irqrestore(&tp->lock, flags);
2862 return (done ? 0 : 1);
2865 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2867 struct tg3_hw_status *sblk = tp->hw_status;
2868 unsigned int work_exists = 0;
2870 /* check for phy events */
2871 if (!(tp->tg3_flags &
2872 (TG3_FLAG_USE_LINKCHG_REG |
2873 TG3_FLAG_POLL_SERDES))) {
2874 if (sblk->status & SD_STATUS_LINK_CHG)
2877 /* check for RX/TX work to do */
2878 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2879 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2885 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2887 struct net_device *dev = dev_id;
2888 struct tg3 *tp = netdev_priv(dev);
2889 struct tg3_hw_status *sblk = tp->hw_status;
2890 unsigned long flags;
2891 unsigned int handled = 1;
2893 spin_lock_irqsave(&tp->lock, flags);
2895 if (sblk->status & SD_STATUS_UPDATED) {
2897 * writing any value to intr-mbox-0 clears PCI INTA# and
2898 * chip-internal interrupt pending events.
2899 * writing non-zero to intr-mbox-0 additional tells the
2900 * NIC to stop sending us irqs, engaging "in-intr-handler"
2903 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2906 * Flush PCI write. This also guarantees that our
2907 * status block has been flushed to host memory.
2909 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2910 sblk->status &= ~SD_STATUS_UPDATED;
2912 if (likely(tg3_has_work(dev, tp)))
2913 netif_rx_schedule(dev); /* schedule NAPI poll */
2915 /* no work, shared interrupt perhaps? re-enable
2916 * interrupts, and flush that PCI write
2918 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2920 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2922 } else { /* shared interrupt */
2926 spin_unlock_irqrestore(&tp->lock, flags);
2928 return IRQ_RETVAL(handled);
2931 static int tg3_init_hw(struct tg3 *);
2932 static int tg3_halt(struct tg3 *);
2934 #ifdef CONFIG_NET_POLL_CONTROLLER
2935 static void tg3_poll_controller(struct net_device *dev)
2937 tg3_interrupt(dev->irq, dev, NULL);
2941 static void tg3_reset_task(void *_data)
2943 struct tg3 *tp = _data;
2944 unsigned int restart_timer;
2948 spin_lock_irq(&tp->lock);
2949 spin_lock(&tp->tx_lock);
2951 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2952 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2957 tg3_netif_start(tp);
2959 spin_unlock(&tp->tx_lock);
2960 spin_unlock_irq(&tp->lock);
2963 mod_timer(&tp->timer, jiffies + 1);
2966 static void tg3_tx_timeout(struct net_device *dev)
2968 struct tg3 *tp = netdev_priv(dev);
2970 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2973 schedule_work(&tp->reset_task);
2976 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2978 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2979 u32 guilty_entry, int guilty_len,
2980 u32 last_plus_one, u32 *start, u32 mss)
2982 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2983 dma_addr_t new_addr;
2992 /* New SKB is guaranteed to be linear. */
2994 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2996 tg3_set_txd(tp, entry, new_addr, new_skb->len,
2997 (skb->ip_summed == CHECKSUM_HW) ?
2998 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2999 *start = NEXT_TX(entry);
3001 /* Now clean up the sw ring entries. */
3003 while (entry != last_plus_one) {
3007 len = skb_headlen(skb);
3009 len = skb_shinfo(skb)->frags[i-1].size;
3010 pci_unmap_single(tp->pdev,
3011 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3012 len, PCI_DMA_TODEVICE);
3014 tp->tx_buffers[entry].skb = new_skb;
3015 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3017 tp->tx_buffers[entry].skb = NULL;
3019 entry = NEXT_TX(entry);
3027 static void tg3_set_txd(struct tg3 *tp, int entry,
3028 dma_addr_t mapping, int len, u32 flags,
3031 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3032 int is_end = (mss_and_is_end & 0x1);
3033 u32 mss = (mss_and_is_end >> 1);
3037 flags |= TXD_FLAG_END;
3038 if (flags & TXD_FLAG_VLAN) {
3039 vlan_tag = flags >> 16;
3042 vlan_tag |= (mss << TXD_MSS_SHIFT);
3044 txd->addr_hi = ((u64) mapping >> 32);
3045 txd->addr_lo = ((u64) mapping & 0xffffffff);
3046 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3047 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3050 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3052 u32 base = (u32) mapping & 0xffffffff;
3054 return ((base > 0xffffdcc0) &&
3055 (base + len + 8 < base));
3058 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3060 struct tg3 *tp = netdev_priv(dev);
3063 u32 len, entry, base_flags, mss;
3064 int would_hit_hwbug;
3065 unsigned long flags;
3067 len = skb_headlen(skb);
3069 /* No BH disabling for tx_lock here. We are running in BH disabled
3070 * context and TX reclaim runs via tp->poll inside of a software
3071 * interrupt. Rejoice!
3073 * Actually, things are not so simple. If we are to take a hw
3074 * IRQ here, we can deadlock, consider:
3083 * spin on tp->tx_lock
3085 * So we really do need to disable interrupts when taking
3088 local_irq_save(flags);
3089 if (!spin_trylock(&tp->tx_lock)) {
3090 local_irq_restore(flags);
3091 return NETDEV_TX_LOCKED;
3094 /* This is a hard error, log it. */
3095 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3096 netif_stop_queue(dev);
3097 spin_unlock_irqrestore(&tp->tx_lock, flags);
3098 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3100 return NETDEV_TX_BUSY;
3103 entry = tp->tx_prod;
3105 if (skb->ip_summed == CHECKSUM_HW)
3106 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3107 #if TG3_TSO_SUPPORT != 0
3109 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3110 (mss = skb_shinfo(skb)->tso_size) != 0) {
3111 int tcp_opt_len, ip_tcp_len;
3113 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3114 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3116 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3117 TXD_FLAG_CPU_POST_DMA);
3119 skb->nh.iph->check = 0;
3120 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3121 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3122 skb->h.th->check = 0;
3123 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3127 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3132 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3133 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3134 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3137 tsflags = ((skb->nh.iph->ihl - 5) +
3138 (tcp_opt_len >> 2));
3139 mss |= (tsflags << 11);
3142 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3145 tsflags = ((skb->nh.iph->ihl - 5) +
3146 (tcp_opt_len >> 2));
3147 base_flags |= tsflags << 12;
3154 #if TG3_VLAN_TAG_USED
3155 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3156 base_flags |= (TXD_FLAG_VLAN |
3157 (vlan_tx_tag_get(skb) << 16));
3160 /* Queue skb data, a.k.a. the main skb fragment. */
3161 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3163 tp->tx_buffers[entry].skb = skb;
3164 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3166 would_hit_hwbug = 0;
3168 if (tg3_4g_overflow_test(mapping, len))
3169 would_hit_hwbug = entry + 1;
3171 tg3_set_txd(tp, entry, mapping, len, base_flags,
3172 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3174 entry = NEXT_TX(entry);
3176 /* Now loop through additional data fragments, and queue them. */
3177 if (skb_shinfo(skb)->nr_frags > 0) {
3178 unsigned int i, last;
3180 last = skb_shinfo(skb)->nr_frags - 1;
3181 for (i = 0; i <= last; i++) {
3182 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3185 mapping = pci_map_page(tp->pdev,
3188 len, PCI_DMA_TODEVICE);
3190 tp->tx_buffers[entry].skb = NULL;
3191 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3193 if (tg3_4g_overflow_test(mapping, len)) {
3194 /* Only one should match. */
3195 if (would_hit_hwbug)
3197 would_hit_hwbug = entry + 1;
3200 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3201 tg3_set_txd(tp, entry, mapping, len,
3202 base_flags, (i == last)|(mss << 1));
3204 tg3_set_txd(tp, entry, mapping, len,
3205 base_flags, (i == last));
3207 entry = NEXT_TX(entry);
3211 if (would_hit_hwbug) {
3212 u32 last_plus_one = entry;
3214 unsigned int len = 0;
3216 would_hit_hwbug -= 1;
3217 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3218 entry &= (TG3_TX_RING_SIZE - 1);
3221 while (entry != last_plus_one) {
3223 len = skb_headlen(skb);
3225 len = skb_shinfo(skb)->frags[i-1].size;
3227 if (entry == would_hit_hwbug)
3231 entry = NEXT_TX(entry);
3235 /* If the workaround fails due to memory/mapping
3236 * failure, silently drop this packet.
3238 if (tigon3_4gb_hwbug_workaround(tp, skb,
3247 /* Packets are ready, update Tx producer idx local and on card. */
3248 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3250 tp->tx_prod = entry;
3251 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3252 netif_stop_queue(dev);
3256 spin_unlock_irqrestore(&tp->tx_lock, flags);
3258 dev->trans_start = jiffies;
3260 return NETDEV_TX_OK;
3263 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3268 if (new_mtu > ETH_DATA_LEN)
3269 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3271 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3274 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3276 struct tg3 *tp = netdev_priv(dev);
3278 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3281 if (!netif_running(dev)) {
3282 /* We'll just catch it later when the
3285 tg3_set_mtu(dev, tp, new_mtu);
3290 spin_lock_irq(&tp->lock);
3291 spin_lock(&tp->tx_lock);
3295 tg3_set_mtu(dev, tp, new_mtu);
3299 tg3_netif_start(tp);
3301 spin_unlock(&tp->tx_lock);
3302 spin_unlock_irq(&tp->lock);
3307 /* Free up pending packets in all rx/tx rings.
3309 * The chip has been shut down and the driver detached from
3310 * the networking, so no interrupts or new tx packets will
3311 * end up in the driver. tp->{tx,}lock is not held and we are not
3312 * in an interrupt context and thus may sleep.
3314 static void tg3_free_rings(struct tg3 *tp)
3316 struct ring_info *rxp;
3319 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3320 rxp = &tp->rx_std_buffers[i];
3322 if (rxp->skb == NULL)
3324 pci_unmap_single(tp->pdev,
3325 pci_unmap_addr(rxp, mapping),
3326 RX_PKT_BUF_SZ - tp->rx_offset,
3327 PCI_DMA_FROMDEVICE);
3328 dev_kfree_skb_any(rxp->skb);
3332 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3333 rxp = &tp->rx_jumbo_buffers[i];
3335 if (rxp->skb == NULL)
3337 pci_unmap_single(tp->pdev,
3338 pci_unmap_addr(rxp, mapping),
3339 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3340 PCI_DMA_FROMDEVICE);
3341 dev_kfree_skb_any(rxp->skb);
3345 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3346 struct tx_ring_info *txp;
3347 struct sk_buff *skb;
3350 txp = &tp->tx_buffers[i];
3358 pci_unmap_single(tp->pdev,
3359 pci_unmap_addr(txp, mapping),
3366 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3367 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3368 pci_unmap_page(tp->pdev,
3369 pci_unmap_addr(txp, mapping),
3370 skb_shinfo(skb)->frags[j].size,
3375 dev_kfree_skb_any(skb);
3379 /* Initialize tx/rx rings for packet processing.
3381 * The chip has been shut down and the driver detached from
3382 * the networking, so no interrupts or new tx packets will
3383 * end up in the driver. tp->{tx,}lock are held and thus
3386 static void tg3_init_rings(struct tg3 *tp)
3390 /* Free up all the SKBs. */
3393 /* Zero out all descriptors. */
3394 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3395 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3396 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3397 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3399 /* Initialize invariants of the rings, we only set this
3400 * stuff once. This works because the card does not
3401 * write into the rx buffer posting rings.
3403 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3404 struct tg3_rx_buffer_desc *rxd;
3406 rxd = &tp->rx_std[i];
3407 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3409 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3410 rxd->opaque = (RXD_OPAQUE_RING_STD |
3411 (i << RXD_OPAQUE_INDEX_SHIFT));
3414 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3415 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3416 struct tg3_rx_buffer_desc *rxd;
3418 rxd = &tp->rx_jumbo[i];
3419 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3421 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3423 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3424 (i << RXD_OPAQUE_INDEX_SHIFT));
3428 /* Now allocate fresh SKBs for each rx ring. */
3429 for (i = 0; i < tp->rx_pending; i++) {
3430 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3435 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3436 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3437 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3445 * Must not be invoked with interrupt sources disabled and
3446 * the hardware shutdown down.
3448 static void tg3_free_consistent(struct tg3 *tp)
3450 if (tp->rx_std_buffers) {
3451 kfree(tp->rx_std_buffers);
3452 tp->rx_std_buffers = NULL;
3455 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3456 tp->rx_std, tp->rx_std_mapping);
3460 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3461 tp->rx_jumbo, tp->rx_jumbo_mapping);
3462 tp->rx_jumbo = NULL;
3465 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3466 tp->rx_rcb, tp->rx_rcb_mapping);
3470 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3471 tp->tx_ring, tp->tx_desc_mapping);
3474 if (tp->hw_status) {
3475 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3476 tp->hw_status, tp->status_mapping);
3477 tp->hw_status = NULL;
3480 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3481 tp->hw_stats, tp->stats_mapping);
3482 tp->hw_stats = NULL;
3487 * Must not be invoked with interrupt sources disabled and
3488 * the hardware shutdown down. Can sleep.
3490 static int tg3_alloc_consistent(struct tg3 *tp)
3492 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3494 TG3_RX_JUMBO_RING_SIZE)) +
3495 (sizeof(struct tx_ring_info) *
3498 if (!tp->rx_std_buffers)
3501 memset(tp->rx_std_buffers, 0,
3502 (sizeof(struct ring_info) *
3504 TG3_RX_JUMBO_RING_SIZE)) +
3505 (sizeof(struct tx_ring_info) *
3508 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3509 tp->tx_buffers = (struct tx_ring_info *)
3510 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3512 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3513 &tp->rx_std_mapping);
3517 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3518 &tp->rx_jumbo_mapping);
3523 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3524 &tp->rx_rcb_mapping);
3528 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3529 &tp->tx_desc_mapping);
3533 tp->hw_status = pci_alloc_consistent(tp->pdev,
3535 &tp->status_mapping);
3539 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3540 sizeof(struct tg3_hw_stats),
3541 &tp->stats_mapping);
3545 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3546 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3551 tg3_free_consistent(tp);
3555 #define MAX_WAIT_CNT 1000
3557 /* To stop a block, clear the enable bit and poll till it
3558 * clears. tp->lock is held.
3560 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3573 /* We can't enable/disable these bits of the
3574 * 5705/5750, just say success.
3587 for (i = 0; i < MAX_WAIT_CNT; i++) {
3590 if ((val & enable_bit) == 0)
3594 if (i == MAX_WAIT_CNT) {
3595 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3596 "ofs=%lx enable_bit=%x\n",
3604 /* tp->lock is held. */
3605 static int tg3_abort_hw(struct tg3 *tp)
3609 tg3_disable_ints(tp);
3611 tp->rx_mode &= ~RX_MODE_ENABLE;
3612 tw32_f(MAC_RX_MODE, tp->rx_mode);
3615 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3616 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3617 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3618 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3619 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3620 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3622 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3623 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3624 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3625 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3626 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3627 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3628 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3632 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3633 tw32_f(MAC_MODE, tp->mac_mode);
3636 tp->tx_mode &= ~TX_MODE_ENABLE;
3637 tw32_f(MAC_TX_MODE, tp->tx_mode);
3639 for (i = 0; i < MAX_WAIT_CNT; i++) {
3641 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3644 if (i >= MAX_WAIT_CNT) {
3645 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3646 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3647 tp->dev->name, tr32(MAC_TX_MODE));
3651 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3652 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3653 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3655 tw32(FTQ_RESET, 0xffffffff);
3656 tw32(FTQ_RESET, 0x00000000);
3658 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3659 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3664 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3666 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3672 /* tp->lock is held. */
3673 static int tg3_nvram_lock(struct tg3 *tp)
3675 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3678 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3679 for (i = 0; i < 8000; i++) {
3680 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3690 /* tp->lock is held. */
3691 static void tg3_nvram_unlock(struct tg3 *tp)
3693 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3694 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3697 /* tp->lock is held. */
3698 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3700 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3701 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3703 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3705 case RESET_KIND_INIT:
3706 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3710 case RESET_KIND_SHUTDOWN:
3711 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3715 case RESET_KIND_SUSPEND:
3716 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3726 /* tp->lock is held. */
3727 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3729 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3731 case RESET_KIND_INIT:
3732 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3733 DRV_STATE_START_DONE);
3736 case RESET_KIND_SHUTDOWN:
3737 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3738 DRV_STATE_UNLOAD_DONE);
3747 /* tp->lock is held. */
3748 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3750 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3752 case RESET_KIND_INIT:
3753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3757 case RESET_KIND_SHUTDOWN:
3758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3762 case RESET_KIND_SUSPEND:
3763 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3773 static void tg3_stop_fw(struct tg3 *);
3775 /* tp->lock is held. */
3776 static int tg3_chip_reset(struct tg3 *tp)
3782 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3786 * We must avoid the readl() that normally takes place.
3787 * It locks machines, causes machine checks, and other
3788 * fun things. So, temporarily disable the 5701
3789 * hardware workaround, while we do the reset.
3791 flags_save = tp->tg3_flags;
3792 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3795 val = GRC_MISC_CFG_CORECLK_RESET;
3797 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3798 if (tr32(0x7e2c) == 0x60) {
3801 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3802 tw32(GRC_MISC_CFG, (1 << 29));
3807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3809 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3810 tw32(GRC_MISC_CFG, val);
3812 /* restore 5701 hardware bug workaround flag */
3813 tp->tg3_flags = flags_save;
3815 /* Unfortunately, we have to delay before the PCI read back.
3816 * Some 575X chips even will not respond to a PCI cfg access
3817 * when the reset command is given to the chip.
3819 * How do these hardware designers expect things to work
3820 * properly if the PCI write is posted for a long period
3821 * of time? It is always necessary to have some method by
3822 * which a register read back can occur to push the write
3823 * out which does the reset.
3825 * For most tg3 variants the trick below was working.
3830 /* Flush PCI posted writes. The normal MMIO registers
3831 * are inaccessible at this time so this is the only
3832 * way to make this reliably (actually, this is no longer
3833 * the case, see above). I tried to use indirect
3834 * register read/write but this upset some 5701 variants.
3836 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3840 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3841 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3845 /* Wait for link training to complete. */
3846 for (i = 0; i < 5000; i++)
3849 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3850 pci_write_config_dword(tp->pdev, 0xc4,
3851 cfg_val | (1 << 15));
3853 /* Set PCIE max payload size and clear error status. */
3854 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3857 /* Re-enable indirect register accesses. */
3858 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3859 tp->misc_host_ctrl);
3861 /* Set MAX PCI retry to zero. */
3862 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3863 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3864 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3865 val |= PCISTATE_RETRY_SAME_DMA;
3866 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3868 pci_restore_state(tp->pdev);
3870 /* Make sure PCI-X relaxed ordering bit is clear. */
3871 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3872 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3873 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3875 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3877 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3879 tw32(0x5000, 0x400);
3882 tw32(GRC_MODE, tp->grc_mode);
3884 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3885 u32 val = tr32(0xc4);
3887 tw32(0xc4, val | (1 << 15));
3890 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3892 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3893 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3894 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3895 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3898 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3899 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3900 tw32_f(MAC_MODE, tp->mac_mode);
3902 tw32_f(MAC_MODE, 0);
3905 /* Wait for firmware initialization to complete. */
3906 for (i = 0; i < 100000; i++) {
3907 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3908 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3913 !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3914 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3915 "firmware will not restart magic=%08x\n",
3916 tp->dev->name, val);
3920 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3921 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3922 u32 val = tr32(0x7c00);
3924 tw32(0x7c00, val | (1 << 25));
3927 /* Reprobe ASF enable state. */
3928 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3929 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3930 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3931 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3934 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3935 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3936 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3938 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3945 /* tp->lock is held. */
3946 static void tg3_stop_fw(struct tg3 *tp)
3948 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3952 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3953 val = tr32(GRC_RX_CPU_EVENT);
3955 tw32(GRC_RX_CPU_EVENT, val);
3957 /* Wait for RX cpu to ACK the event. */
3958 for (i = 0; i < 100; i++) {
3959 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3966 /* tp->lock is held. */
3967 static int tg3_halt(struct tg3 *tp)
3973 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3976 err = tg3_chip_reset(tp);
3978 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3979 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3987 #define TG3_FW_RELEASE_MAJOR 0x0
3988 #define TG3_FW_RELASE_MINOR 0x0
3989 #define TG3_FW_RELEASE_FIX 0x0
3990 #define TG3_FW_START_ADDR 0x08000000
3991 #define TG3_FW_TEXT_ADDR 0x08000000
3992 #define TG3_FW_TEXT_LEN 0x9c0
3993 #define TG3_FW_RODATA_ADDR 0x080009c0
3994 #define TG3_FW_RODATA_LEN 0x60
3995 #define TG3_FW_DATA_ADDR 0x08000a40
3996 #define TG3_FW_DATA_LEN 0x20
3997 #define TG3_FW_SBSS_ADDR 0x08000a60
3998 #define TG3_FW_SBSS_LEN 0xc
3999 #define TG3_FW_BSS_ADDR 0x08000a70
4000 #define TG3_FW_BSS_LEN 0x10
4002 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4003 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4004 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4005 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4006 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4007 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4008 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4009 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4010 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4011 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4012 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4013 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4014 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4015 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4016 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4017 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4018 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4019 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4020 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4021 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4022 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4023 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4024 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4025 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4026 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4027 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4029 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4030 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4031 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4032 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4033 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4034 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4035 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4036 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4037 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4038 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4039 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4040 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4041 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4042 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4043 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4044 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4045 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4046 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4047 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4048 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4049 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4050 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4051 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4052 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4053 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4054 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4055 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4056 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4057 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4058 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4059 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4060 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4061 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4062 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4063 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4064 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4065 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4066 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4067 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4068 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4069 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4070 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4071 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4072 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4073 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4074 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4075 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4076 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4077 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4078 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4079 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4080 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4081 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4082 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4083 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4084 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4085 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4086 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4087 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4088 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4089 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4090 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4091 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4092 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4093 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4096 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4097 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4098 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4099 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4100 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4104 #if 0 /* All zeros, don't eat up space with it. */
4105 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4106 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4107 0x00000000, 0x00000000, 0x00000000, 0x00000000
4111 #define RX_CPU_SCRATCH_BASE 0x30000
4112 #define RX_CPU_SCRATCH_SIZE 0x04000
4113 #define TX_CPU_SCRATCH_BASE 0x34000
4114 #define TX_CPU_SCRATCH_SIZE 0x04000
4116 /* tp->lock is held. */
4117 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4121 if (offset == TX_CPU_BASE &&
4122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4125 if (offset == RX_CPU_BASE) {
4126 for (i = 0; i < 10000; i++) {
4127 tw32(offset + CPU_STATE, 0xffffffff);
4128 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4129 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4133 tw32(offset + CPU_STATE, 0xffffffff);
4134 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4137 for (i = 0; i < 10000; i++) {
4138 tw32(offset + CPU_STATE, 0xffffffff);
4139 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4140 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4146 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4149 (offset == RX_CPU_BASE ? "RX" : "TX"));
4156 unsigned int text_base;
4157 unsigned int text_len;
4159 unsigned int rodata_base;
4160 unsigned int rodata_len;
4162 unsigned int data_base;
4163 unsigned int data_len;
4167 /* tp->lock is held. */
4168 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4169 int cpu_scratch_size, struct fw_info *info)
4172 u32 orig_tg3_flags = tp->tg3_flags;
4173 void (*write_op)(struct tg3 *, u32, u32);
4175 if (cpu_base == TX_CPU_BASE &&
4176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4177 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4178 "TX cpu firmware on %s which is 5705.\n",
4183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4184 write_op = tg3_write_mem;
4186 write_op = tg3_write_indirect_reg32;
4188 /* Force use of PCI config space for indirect register
4191 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4193 err = tg3_halt_cpu(tp, cpu_base);
4197 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4198 write_op(tp, cpu_scratch_base + i, 0);
4199 tw32(cpu_base + CPU_STATE, 0xffffffff);
4200 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4201 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4202 write_op(tp, (cpu_scratch_base +
4203 (info->text_base & 0xffff) +
4206 info->text_data[i] : 0));
4207 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4208 write_op(tp, (cpu_scratch_base +
4209 (info->rodata_base & 0xffff) +
4211 (info->rodata_data ?
4212 info->rodata_data[i] : 0));
4213 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4214 write_op(tp, (cpu_scratch_base +
4215 (info->data_base & 0xffff) +
4218 info->data_data[i] : 0));
4223 tp->tg3_flags = orig_tg3_flags;
4227 /* tp->lock is held. */
4228 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4230 struct fw_info info;
4233 info.text_base = TG3_FW_TEXT_ADDR;
4234 info.text_len = TG3_FW_TEXT_LEN;
4235 info.text_data = &tg3FwText[0];
4236 info.rodata_base = TG3_FW_RODATA_ADDR;
4237 info.rodata_len = TG3_FW_RODATA_LEN;
4238 info.rodata_data = &tg3FwRodata[0];
4239 info.data_base = TG3_FW_DATA_ADDR;
4240 info.data_len = TG3_FW_DATA_LEN;
4241 info.data_data = NULL;
4243 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4244 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4249 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4250 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4255 /* Now startup only the RX cpu. */
4256 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4257 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4259 for (i = 0; i < 5; i++) {
4260 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4262 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4263 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4264 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4268 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4269 "to set RX CPU PC, is %08x should be %08x\n",
4270 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4274 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4275 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4280 #if TG3_TSO_SUPPORT != 0
4282 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4283 #define TG3_TSO_FW_RELASE_MINOR 0x6
4284 #define TG3_TSO_FW_RELEASE_FIX 0x0
4285 #define TG3_TSO_FW_START_ADDR 0x08000000
4286 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4287 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4288 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4289 #define TG3_TSO_FW_RODATA_LEN 0x60
4290 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4291 #define TG3_TSO_FW_DATA_LEN 0x30
4292 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4293 #define TG3_TSO_FW_SBSS_LEN 0x2c
4294 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4295 #define TG3_TSO_FW_BSS_LEN 0x894
4297 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4298 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4299 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4300 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4301 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4302 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4303 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4304 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4305 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4306 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4307 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4308 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4309 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4310 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4311 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4312 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4313 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4314 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4315 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4316 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4317 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4318 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4319 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4320 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4321 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4322 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4323 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4324 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4325 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4326 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4327 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4328 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4329 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4330 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4331 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4332 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4333 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4334 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4335 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4336 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4337 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4338 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4339 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4340 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4341 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4342 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4343 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4344 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4345 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4346 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4347 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4348 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4349 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4350 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4351 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4352 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4353 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4354 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4355 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4356 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4357 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4358 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4359 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4360 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4361 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4362 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4363 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4364 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4365 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4366 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4367 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4368 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4369 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4370 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4371 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4372 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4373 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4374 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4375 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4376 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4377 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4378 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4379 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4380 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4381 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4382 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4383 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4384 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4385 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4386 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4387 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4388 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4389 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4390 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4391 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4392 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4393 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4394 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4395 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4396 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4397 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4398 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4399 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4400 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4401 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4402 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4403 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4404 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4405 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4406 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4407 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4408 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4409 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4410 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4411 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4412 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4413 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4414 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4415 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4416 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4417 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4418 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4419 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4420 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4421 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4422 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4423 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4424 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4425 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4426 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4427 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4428 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4429 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4430 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4431 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4432 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4433 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4434 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4435 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4436 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4437 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4438 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4439 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4440 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4441 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4442 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4443 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4444 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4445 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4446 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4447 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4448 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4449 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4450 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4451 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4452 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4453 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4454 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4455 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4456 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4457 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4458 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4459 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4460 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4461 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4462 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4463 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4464 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4465 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4466 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4467 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4468 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4469 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4470 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4471 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4472 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4473 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4474 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4475 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4476 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4477 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4478 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4479 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4480 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4481 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4482 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4483 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4484 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4485 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4486 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4487 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4488 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4489 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4490 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4491 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4492 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4493 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4494 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4495 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4496 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4497 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4498 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4499 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4500 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4501 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4502 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4503 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4504 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4505 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4506 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4507 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4508 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4509 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4510 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4511 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4512 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4513 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4514 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4515 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4516 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4517 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4518 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4519 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4520 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4521 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4522 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4523 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4524 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4525 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4526 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4527 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4528 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4529 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4530 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4531 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4532 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4533 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4534 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4535 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4536 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4537 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4538 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4539 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4540 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4541 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4542 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4543 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4544 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4545 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4546 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4547 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4548 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4549 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4550 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4551 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4552 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4553 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4554 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4555 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4556 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4557 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4558 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4559 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4560 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4561 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4562 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4563 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4564 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4565 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4566 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4567 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4568 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4569 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4570 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4571 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4572 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4573 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4574 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4575 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4576 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4577 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4578 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4579 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4580 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4581 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4584 static u32 tg3TsoFwRodata[] = {
4585 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4586 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4587 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4588 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4592 static u32 tg3TsoFwData[] = {
4593 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4594 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4598 /* 5705 needs a special version of the TSO firmware. */
4599 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4600 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4601 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4602 #define TG3_TSO5_FW_START_ADDR 0x00010000
4603 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4604 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4605 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4606 #define TG3_TSO5_FW_RODATA_LEN 0x50
4607 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4608 #define TG3_TSO5_FW_DATA_LEN 0x20
4609 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4610 #define TG3_TSO5_FW_SBSS_LEN 0x28
4611 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4612 #define TG3_TSO5_FW_BSS_LEN 0x88
4614 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4615 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4616 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4617 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4618 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4619 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4620 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4621 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4622 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4623 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4624 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4625 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4626 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4627 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4628 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4629 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4630 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4631 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4632 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4633 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4634 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4635 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4636 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4637 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4638 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4639 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4640 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4641 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4642 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4643 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4644 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4645 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4646 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4647 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4648 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4649 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4650 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4651 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4652 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4653 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4654 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4655 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4656 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4657 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4658 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4659 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4660 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4661 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4662 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4663 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4664 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4665 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4666 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4667 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4668 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4669 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4670 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4671 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4672 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4673 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4674 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4675 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4676 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4677 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4678 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4679 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4680 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4681 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4682 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4683 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4684 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4685 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4686 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4687 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4688 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4689 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4690 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4691 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4692 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4693 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4694 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4695 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4696 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4697 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4698 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4699 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4700 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4701 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4702 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4703 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4704 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4705 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4706 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4707 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4708 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4709 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4710 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4711 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4712 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4713 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4714 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4715 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4716 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4717 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4718 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4719 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4720 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4721 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4722 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4723 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4724 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4725 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4726 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4727 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4728 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4729 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4730 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4731 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4732 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4733 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4734 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4735 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4736 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4737 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4738 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4739 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4740 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4741 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4742 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4743 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4744 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4745 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4746 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4747 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4748 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4749 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4750 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4751 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4752 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4753 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4754 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4755 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4756 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4757 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4758 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4759 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4760 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4761 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4762 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4763 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4764 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4765 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4766 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4767 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4768 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4769 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4770 0x00000000, 0x00000000, 0x00000000,
4773 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4774 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4775 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4776 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4777 0x00000000, 0x00000000, 0x00000000,
4780 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4781 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4782 0x00000000, 0x00000000, 0x00000000,
4785 /* tp->lock is held. */
4786 static int tg3_load_tso_firmware(struct tg3 *tp)
4788 struct fw_info info;
4789 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4792 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4795 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4796 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4797 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4798 info.text_data = &tg3Tso5FwText[0];
4799 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4800 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4801 info.rodata_data = &tg3Tso5FwRodata[0];
4802 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4803 info.data_len = TG3_TSO5_FW_DATA_LEN;
4804 info.data_data = &tg3Tso5FwData[0];
4805 cpu_base = RX_CPU_BASE;
4806 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4807 cpu_scratch_size = (info.text_len +
4810 TG3_TSO5_FW_SBSS_LEN +
4811 TG3_TSO5_FW_BSS_LEN);
4813 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4814 info.text_len = TG3_TSO_FW_TEXT_LEN;
4815 info.text_data = &tg3TsoFwText[0];
4816 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4817 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4818 info.rodata_data = &tg3TsoFwRodata[0];
4819 info.data_base = TG3_TSO_FW_DATA_ADDR;
4820 info.data_len = TG3_TSO_FW_DATA_LEN;
4821 info.data_data = &tg3TsoFwData[0];
4822 cpu_base = TX_CPU_BASE;
4823 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4824 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4827 err = tg3_load_firmware_cpu(tp, cpu_base,
4828 cpu_scratch_base, cpu_scratch_size,
4833 /* Now startup the cpu. */
4834 tw32(cpu_base + CPU_STATE, 0xffffffff);
4835 tw32_f(cpu_base + CPU_PC, info.text_base);
4837 for (i = 0; i < 5; i++) {
4838 if (tr32(cpu_base + CPU_PC) == info.text_base)
4840 tw32(cpu_base + CPU_STATE, 0xffffffff);
4841 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
4842 tw32_f(cpu_base + CPU_PC, info.text_base);
4846 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4847 "to set CPU PC, is %08x should be %08x\n",
4848 tp->dev->name, tr32(cpu_base + CPU_PC),
4852 tw32(cpu_base + CPU_STATE, 0xffffffff);
4853 tw32_f(cpu_base + CPU_MODE, 0x00000000);
4857 #endif /* TG3_TSO_SUPPORT != 0 */
4859 /* tp->lock is held. */
4860 static void __tg3_set_mac_addr(struct tg3 *tp)
4862 u32 addr_high, addr_low;
4865 addr_high = ((tp->dev->dev_addr[0] << 8) |
4866 tp->dev->dev_addr[1]);
4867 addr_low = ((tp->dev->dev_addr[2] << 24) |
4868 (tp->dev->dev_addr[3] << 16) |
4869 (tp->dev->dev_addr[4] << 8) |
4870 (tp->dev->dev_addr[5] << 0));
4871 for (i = 0; i < 4; i++) {
4872 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4873 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4878 for (i = 0; i < 12; i++) {
4879 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4880 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4884 addr_high = (tp->dev->dev_addr[0] +
4885 tp->dev->dev_addr[1] +
4886 tp->dev->dev_addr[2] +
4887 tp->dev->dev_addr[3] +
4888 tp->dev->dev_addr[4] +
4889 tp->dev->dev_addr[5]) &
4890 TX_BACKOFF_SEED_MASK;
4891 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4894 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4896 struct tg3 *tp = netdev_priv(dev);
4897 struct sockaddr *addr = p;
4899 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4901 spin_lock_irq(&tp->lock);
4902 __tg3_set_mac_addr(tp);
4903 spin_unlock_irq(&tp->lock);
4908 /* tp->lock is held. */
4909 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4910 dma_addr_t mapping, u32 maxlen_flags,
4914 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4915 ((u64) mapping >> 32));
4917 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4918 ((u64) mapping & 0xffffffff));
4920 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4923 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
4924 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750))
4926 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4930 static void __tg3_set_rx_mode(struct net_device *);
4932 /* tp->lock is held. */
4933 static int tg3_reset_hw(struct tg3 *tp)
4935 u32 val, rdmac_mode;
4938 tg3_disable_ints(tp);
4942 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4944 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4945 err = tg3_abort_hw(tp);
4950 err = tg3_chip_reset(tp);
4954 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4956 /* This works around an issue with Athlon chipsets on
4957 * B3 tigon3 silicon. This bit has no effect on any
4958 * other revision. But do not set this on PCI Express
4961 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4962 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4963 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4965 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4966 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4967 val = tr32(TG3PCI_PCISTATE);
4968 val |= PCISTATE_RETRY_SAME_DMA;
4969 tw32(TG3PCI_PCISTATE, val);
4972 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4973 /* Enable some hw fixes. */
4974 val = tr32(TG3PCI_MSI_DATA);
4975 val |= (1 << 26) | (1 << 28) | (1 << 29);
4976 tw32(TG3PCI_MSI_DATA, val);
4979 /* Descriptor ring init may make accesses to the
4980 * NIC SRAM area to setup the TX descriptors, so we
4981 * can only do this after the hardware has been
4982 * successfully reset.
4986 /* This value is determined during the probe time DMA
4987 * engine test, tg3_test_dma.
4989 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4991 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4992 GRC_MODE_4X_NIC_SEND_RINGS |
4993 GRC_MODE_NO_TX_PHDR_CSUM |
4994 GRC_MODE_NO_RX_PHDR_CSUM);
4995 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4996 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4997 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4998 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4999 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5003 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5005 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5006 val = tr32(GRC_MISC_CFG);
5008 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5009 tw32(GRC_MISC_CFG, val);
5011 /* Initialize MBUF/DESC pool. */
5012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5014 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5015 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5017 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5019 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5020 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5021 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5023 #if TG3_TSO_SUPPORT != 0
5024 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5027 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5028 TG3_TSO5_FW_RODATA_LEN +
5029 TG3_TSO5_FW_DATA_LEN +
5030 TG3_TSO5_FW_SBSS_LEN +
5031 TG3_TSO5_FW_BSS_LEN);
5032 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5033 tw32(BUFMGR_MB_POOL_ADDR,
5034 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5035 tw32(BUFMGR_MB_POOL_SIZE,
5036 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5040 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5041 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5042 tp->bufmgr_config.mbuf_read_dma_low_water);
5043 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5044 tp->bufmgr_config.mbuf_mac_rx_low_water);
5045 tw32(BUFMGR_MB_HIGH_WATER,
5046 tp->bufmgr_config.mbuf_high_water);
5048 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5049 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5050 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5051 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5052 tw32(BUFMGR_MB_HIGH_WATER,
5053 tp->bufmgr_config.mbuf_high_water_jumbo);
5055 tw32(BUFMGR_DMA_LOW_WATER,
5056 tp->bufmgr_config.dma_low_water);
5057 tw32(BUFMGR_DMA_HIGH_WATER,
5058 tp->bufmgr_config.dma_high_water);
5060 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5061 for (i = 0; i < 2000; i++) {
5062 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5067 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5072 /* Setup replenish threshold. */
5073 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5075 /* Initialize TG3_BDINFO's at:
5076 * RCVDBDI_STD_BD: standard eth size rx ring
5077 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5078 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5081 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5082 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5083 * ring attribute flags
5084 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5086 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5087 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5089 * The size of each ring is fixed in the firmware, but the location is
5092 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5093 ((u64) tp->rx_std_mapping >> 32));
5094 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5095 ((u64) tp->rx_std_mapping & 0xffffffff));
5096 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5097 NIC_SRAM_RX_BUFFER_DESC);
5099 /* Don't even try to program the JUMBO/MINI buffer descriptor
5102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5104 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5105 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5107 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5108 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5110 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5111 BDINFO_FLAGS_DISABLED);
5113 /* Setup replenish threshold. */
5114 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5116 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5117 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5118 ((u64) tp->rx_jumbo_mapping >> 32));
5119 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5120 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5121 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5122 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5123 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5124 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5126 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5127 BDINFO_FLAGS_DISABLED);
5132 /* There is only one send ring on 5705/5750, no need to explicitly
5133 * disable the others.
5135 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5136 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5137 /* Clear out send RCB ring in SRAM. */
5138 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5139 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5140 BDINFO_FLAGS_DISABLED);
5145 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5146 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5148 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5149 tp->tx_desc_mapping,
5150 (TG3_TX_RING_SIZE <<
5151 BDINFO_FLAGS_MAXLEN_SHIFT),
5152 NIC_SRAM_TX_BUFFER_DESC);
5154 /* There is only one receive return ring on 5705/5750, no need
5155 * to explicitly disable the others.
5157 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5158 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5159 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5160 i += TG3_BDINFO_SIZE) {
5161 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5162 BDINFO_FLAGS_DISABLED);
5167 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5169 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5171 (TG3_RX_RCB_RING_SIZE(tp) <<
5172 BDINFO_FLAGS_MAXLEN_SHIFT),
5175 tp->rx_std_ptr = tp->rx_pending;
5176 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5179 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5180 tp->rx_jumbo_pending : 0;
5181 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5184 /* Initialize MAC address and backoff seed. */
5185 __tg3_set_mac_addr(tp);
5187 /* MTU + ethernet header + FCS + optional VLAN tag */
5188 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5190 /* The slot time is changed by tg3_setup_phy if we
5191 * run at gigabit with half duplex.
5193 tw32(MAC_TX_LENGTHS,
5194 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5195 (6 << TX_LENGTHS_IPG_SHIFT) |
5196 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5198 /* Receive rules. */
5199 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5200 tw32(RCVLPC_CONFIG, 0x0181);
5202 /* Calculate RDMAC_MODE setting early, we need it to determine
5203 * the RCVLPC_STATE_ENABLE mask.
5205 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5206 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5207 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5208 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5209 RDMAC_MODE_LNGREAD_ENAB);
5210 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5211 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5212 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5213 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5214 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5215 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5216 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5217 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5218 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5219 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5220 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5221 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5225 #if TG3_TSO_SUPPORT != 0
5226 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5227 rdmac_mode |= (1 << 27);
5230 /* Receive/send statistics. */
5231 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5232 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5233 val = tr32(RCVLPC_STATS_ENABLE);
5234 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5235 tw32(RCVLPC_STATS_ENABLE, val);
5237 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5239 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5240 tw32(SNDDATAI_STATSENAB, 0xffffff);
5241 tw32(SNDDATAI_STATSCTRL,
5242 (SNDDATAI_SCTRL_ENABLE |
5243 SNDDATAI_SCTRL_FASTUPD));
5245 /* Setup host coalescing engine. */
5246 tw32(HOSTCC_MODE, 0);
5247 for (i = 0; i < 2000; i++) {
5248 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5253 tw32(HOSTCC_RXCOL_TICKS, 0);
5254 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5255 tw32(HOSTCC_RXMAX_FRAMES, 1);
5256 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5257 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5258 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5259 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5260 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5262 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5263 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5265 /* set status block DMA address */
5266 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5267 ((u64) tp->status_mapping >> 32));
5268 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5269 ((u64) tp->status_mapping & 0xffffffff));
5271 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5272 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5273 /* Status/statistics block address. See tg3_timer,
5274 * the tg3_periodic_fetch_stats call there, and
5275 * tg3_get_stats to see how this works for 5705/5750 chips.
5277 tw32(HOSTCC_STAT_COAL_TICKS,
5278 DEFAULT_STAT_COAL_TICKS);
5279 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5280 ((u64) tp->stats_mapping >> 32));
5281 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5282 ((u64) tp->stats_mapping & 0xffffffff));
5283 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5284 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5287 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5289 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5290 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5291 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5292 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5293 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5295 /* Clear statistics/status block in chip, and status block in ram. */
5296 for (i = NIC_SRAM_STATS_BLK;
5297 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5299 tg3_write_mem(tp, i, 0);
5302 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5304 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5305 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5306 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5309 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5311 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5312 GRC_LCLCTRL_GPIO_OUTPUT1);
5313 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5316 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5317 tr32(MAILBOX_INTERRUPT_0);
5319 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5320 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5321 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5325 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5326 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5327 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5328 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5329 WDMAC_MODE_LNGREAD_ENAB);
5331 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5332 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5334 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5335 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5336 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5338 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5339 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5340 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5341 val |= WDMAC_MODE_RX_ACCEL;
5345 tw32_f(WDMAC_MODE, val);
5348 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5349 val = tr32(TG3PCI_X_CAPS);
5350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5351 val &= ~PCIX_CAPS_BURST_MASK;
5352 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5353 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5354 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5355 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5356 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5357 val |= (tp->split_mode_max_reqs <<
5358 PCIX_CAPS_SPLIT_SHIFT);
5360 tw32(TG3PCI_X_CAPS, val);
5363 tw32_f(RDMAC_MODE, rdmac_mode);
5366 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5367 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5368 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5369 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5370 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5371 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5372 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5373 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5374 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5375 #if TG3_TSO_SUPPORT != 0
5376 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5377 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5379 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5380 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5382 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5383 err = tg3_load_5701_a0_firmware_fix(tp);
5388 #if TG3_TSO_SUPPORT != 0
5389 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5390 err = tg3_load_tso_firmware(tp);
5396 tp->tx_mode = TX_MODE_ENABLE;
5397 tw32_f(MAC_TX_MODE, tp->tx_mode);
5400 tp->rx_mode = RX_MODE_ENABLE;
5401 tw32_f(MAC_RX_MODE, tp->rx_mode);
5404 if (tp->link_config.phy_is_low_power) {
5405 tp->link_config.phy_is_low_power = 0;
5406 tp->link_config.speed = tp->link_config.orig_speed;
5407 tp->link_config.duplex = tp->link_config.orig_duplex;
5408 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5411 tp->mi_mode = MAC_MI_MODE_BASE;
5412 tw32_f(MAC_MI_MODE, tp->mi_mode);
5415 tw32(MAC_LED_CTRL, tp->led_ctrl);
5417 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5418 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5419 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5422 tw32_f(MAC_RX_MODE, tp->rx_mode);
5425 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5426 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5427 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5428 /* Set drive transmission level to 1.2V */
5429 /* only if the signal pre-emphasis bit is not set */
5430 val = tr32(MAC_SERDES_CFG);
5433 tw32(MAC_SERDES_CFG, val);
5435 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5436 tw32(MAC_SERDES_CFG, 0x616000);
5439 /* Prevent chip from dropping frames when flow control
5442 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5445 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5446 /* Use hardware link auto-negotiation */
5447 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5450 err = tg3_setup_phy(tp, 1);
5454 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5457 /* Clear CRC stats. */
5458 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5459 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5460 tg3_readphy(tp, 0x14, &tmp);
5464 __tg3_set_rx_mode(tp->dev);
5466 /* Initialize receive rules. */
5467 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5468 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5469 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5470 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5477 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5481 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5483 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5485 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5487 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5489 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5491 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5493 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5495 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5497 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5499 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5501 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5503 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5505 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5507 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5515 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5517 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5518 tg3_enable_ints(tp);
5523 /* Called at device open time to get the chip ready for
5524 * packet processing. Invoked with tp->lock held.
5526 static int tg3_init_hw(struct tg3 *tp)
5530 /* Force the chip into D0. */
5531 err = tg3_set_power_state(tp, 0);
5535 tg3_switch_clocks(tp);
5537 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5539 err = tg3_reset_hw(tp);
5545 #define TG3_STAT_ADD32(PSTAT, REG) \
5546 do { u32 __val = tr32(REG); \
5547 (PSTAT)->low += __val; \
5548 if ((PSTAT)->low < __val) \
5549 (PSTAT)->high += 1; \
5552 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5554 struct tg3_hw_stats *sp = tp->hw_stats;
5556 if (!netif_carrier_ok(tp->dev))
5559 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5560 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5561 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5562 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5563 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5564 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5565 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5566 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5567 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5568 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5569 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5570 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5571 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5573 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5574 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5575 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5576 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5577 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5578 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5579 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5580 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5581 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5582 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5583 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5584 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5585 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5586 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5589 static void tg3_timer(unsigned long __opaque)
5591 struct tg3 *tp = (struct tg3 *) __opaque;
5592 unsigned long flags;
5594 spin_lock_irqsave(&tp->lock, flags);
5595 spin_lock(&tp->tx_lock);
5597 /* All of this garbage is because when using non-tagged
5598 * IRQ status the mailbox/status_block protocol the chip
5599 * uses with the cpu is race prone.
5601 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5602 tw32(GRC_LOCAL_CTRL,
5603 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5605 tw32(HOSTCC_MODE, tp->coalesce_mode |
5606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5609 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5610 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5611 spin_unlock(&tp->tx_lock);
5612 spin_unlock_irqrestore(&tp->lock, flags);
5613 schedule_work(&tp->reset_task);
5617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5619 tg3_periodic_fetch_stats(tp);
5621 /* This part only runs once per second. */
5622 if (!--tp->timer_counter) {
5623 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5627 mac_stat = tr32(MAC_STATUS);
5630 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5631 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5633 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5637 tg3_setup_phy(tp, 0);
5638 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5639 u32 mac_stat = tr32(MAC_STATUS);
5642 if (netif_carrier_ok(tp->dev) &&
5643 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5646 if (! netif_carrier_ok(tp->dev) &&
5647 (mac_stat & (MAC_STATUS_PCS_SYNCED |
5648 MAC_STATUS_SIGNAL_DET))) {
5654 ~MAC_MODE_PORT_MODE_MASK));
5656 tw32_f(MAC_MODE, tp->mac_mode);
5658 tg3_setup_phy(tp, 0);
5662 tp->timer_counter = tp->timer_multiplier;
5665 /* Heartbeat is only sent once every 120 seconds. */
5666 if (!--tp->asf_counter) {
5667 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5670 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5671 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5672 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5673 val = tr32(GRC_RX_CPU_EVENT);
5675 tw32(GRC_RX_CPU_EVENT, val);
5677 tp->asf_counter = tp->asf_multiplier;
5680 spin_unlock(&tp->tx_lock);
5681 spin_unlock_irqrestore(&tp->lock, flags);
5683 tp->timer.expires = jiffies + tp->timer_offset;
5684 add_timer(&tp->timer);
5687 static int tg3_open(struct net_device *dev)
5689 struct tg3 *tp = netdev_priv(dev);
5692 spin_lock_irq(&tp->lock);
5693 spin_lock(&tp->tx_lock);
5695 tg3_disable_ints(tp);
5696 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5698 spin_unlock(&tp->tx_lock);
5699 spin_unlock_irq(&tp->lock);
5701 /* The placement of this call is tied
5702 * to the setup and use of Host TX descriptors.
5704 err = tg3_alloc_consistent(tp);
5708 err = request_irq(dev->irq, tg3_interrupt,
5709 SA_SHIRQ, dev->name, dev);
5712 tg3_free_consistent(tp);
5716 spin_lock_irq(&tp->lock);
5717 spin_lock(&tp->tx_lock);
5719 err = tg3_init_hw(tp);
5724 tp->timer_offset = HZ / 10;
5725 tp->timer_counter = tp->timer_multiplier = 10;
5726 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5728 init_timer(&tp->timer);
5729 tp->timer.expires = jiffies + tp->timer_offset;
5730 tp->timer.data = (unsigned long) tp;
5731 tp->timer.function = tg3_timer;
5732 add_timer(&tp->timer);
5734 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5737 spin_unlock(&tp->tx_lock);
5738 spin_unlock_irq(&tp->lock);
5741 free_irq(dev->irq, dev);
5742 tg3_free_consistent(tp);
5746 spin_lock_irq(&tp->lock);
5747 spin_lock(&tp->tx_lock);
5749 tg3_enable_ints(tp);
5751 spin_unlock(&tp->tx_lock);
5752 spin_unlock_irq(&tp->lock);
5754 netif_start_queue(dev);
5760 /*static*/ void tg3_dump_state(struct tg3 *tp)
5762 u32 val32, val32_2, val32_3, val32_4, val32_5;
5766 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5767 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5768 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5772 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5773 tr32(MAC_MODE), tr32(MAC_STATUS));
5774 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5775 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5776 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5777 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5778 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5779 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5781 /* Send data initiator control block */
5782 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5783 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5784 printk(" SNDDATAI_STATSCTRL[%08x]\n",
5785 tr32(SNDDATAI_STATSCTRL));
5787 /* Send data completion control block */
5788 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5790 /* Send BD ring selector block */
5791 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5792 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5794 /* Send BD initiator control block */
5795 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5796 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5798 /* Send BD completion control block */
5799 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5801 /* Receive list placement control block */
5802 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5803 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5804 printk(" RCVLPC_STATSCTRL[%08x]\n",
5805 tr32(RCVLPC_STATSCTRL));
5807 /* Receive data and receive BD initiator control block */
5808 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5809 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5811 /* Receive data completion control block */
5812 printk("DEBUG: RCVDCC_MODE[%08x]\n",
5815 /* Receive BD initiator control block */
5816 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5817 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5819 /* Receive BD completion control block */
5820 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5821 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5823 /* Receive list selector control block */
5824 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5825 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5827 /* Mbuf cluster free block */
5828 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5829 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5831 /* Host coalescing control block */
5832 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5833 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5834 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5835 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5836 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5837 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5838 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5839 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5840 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5841 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5842 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5843 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5845 /* Memory arbiter control block */
5846 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5847 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5849 /* Buffer manager control block */
5850 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5851 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5852 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5853 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5854 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5855 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5856 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5857 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5859 /* Read DMA control block */
5860 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5861 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5863 /* Write DMA control block */
5864 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5865 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5867 /* DMA completion block */
5868 printk("DEBUG: DMAC_MODE[%08x]\n",
5872 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5873 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5874 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5875 tr32(GRC_LOCAL_CTRL));
5878 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5879 tr32(RCVDBDI_JUMBO_BD + 0x0),
5880 tr32(RCVDBDI_JUMBO_BD + 0x4),
5881 tr32(RCVDBDI_JUMBO_BD + 0x8),
5882 tr32(RCVDBDI_JUMBO_BD + 0xc));
5883 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5884 tr32(RCVDBDI_STD_BD + 0x0),
5885 tr32(RCVDBDI_STD_BD + 0x4),
5886 tr32(RCVDBDI_STD_BD + 0x8),
5887 tr32(RCVDBDI_STD_BD + 0xc));
5888 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5889 tr32(RCVDBDI_MINI_BD + 0x0),
5890 tr32(RCVDBDI_MINI_BD + 0x4),
5891 tr32(RCVDBDI_MINI_BD + 0x8),
5892 tr32(RCVDBDI_MINI_BD + 0xc));
5894 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5895 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5896 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5897 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5898 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5899 val32, val32_2, val32_3, val32_4);
5901 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5902 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5903 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5904 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5905 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5906 val32, val32_2, val32_3, val32_4);
5908 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5909 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5910 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5911 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5912 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5913 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5914 val32, val32_2, val32_3, val32_4, val32_5);
5916 /* SW status block */
5917 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5918 tp->hw_status->status,
5919 tp->hw_status->status_tag,
5920 tp->hw_status->rx_jumbo_consumer,
5921 tp->hw_status->rx_consumer,
5922 tp->hw_status->rx_mini_consumer,
5923 tp->hw_status->idx[0].rx_producer,
5924 tp->hw_status->idx[0].tx_consumer);
5926 /* SW statistics block */
5927 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5928 ((u32 *)tp->hw_stats)[0],
5929 ((u32 *)tp->hw_stats)[1],
5930 ((u32 *)tp->hw_stats)[2],
5931 ((u32 *)tp->hw_stats)[3]);
5934 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5935 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5936 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5937 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5938 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5940 /* NIC side send descriptors. */
5941 for (i = 0; i < 6; i++) {
5944 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5945 + (i * sizeof(struct tg3_tx_buffer_desc));
5946 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5948 readl(txd + 0x0), readl(txd + 0x4),
5949 readl(txd + 0x8), readl(txd + 0xc));
5952 /* NIC side RX descriptors. */
5953 for (i = 0; i < 6; i++) {
5956 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5957 + (i * sizeof(struct tg3_rx_buffer_desc));
5958 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5960 readl(rxd + 0x0), readl(rxd + 0x4),
5961 readl(rxd + 0x8), readl(rxd + 0xc));
5962 rxd += (4 * sizeof(u32));
5963 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5965 readl(rxd + 0x0), readl(rxd + 0x4),
5966 readl(rxd + 0x8), readl(rxd + 0xc));
5969 for (i = 0; i < 6; i++) {
5972 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5973 + (i * sizeof(struct tg3_rx_buffer_desc));
5974 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5976 readl(rxd + 0x0), readl(rxd + 0x4),
5977 readl(rxd + 0x8), readl(rxd + 0xc));
5978 rxd += (4 * sizeof(u32));
5979 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5981 readl(rxd + 0x0), readl(rxd + 0x4),
5982 readl(rxd + 0x8), readl(rxd + 0xc));
5987 static struct net_device_stats *tg3_get_stats(struct net_device *);
5988 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5990 static int tg3_close(struct net_device *dev)
5992 struct tg3 *tp = netdev_priv(dev);
5994 netif_stop_queue(dev);
5996 del_timer_sync(&tp->timer);
5998 spin_lock_irq(&tp->lock);
5999 spin_lock(&tp->tx_lock);
6004 tg3_disable_ints(tp);
6009 ~(TG3_FLAG_INIT_COMPLETE |
6010 TG3_FLAG_GOT_SERDES_FLOWCTL);
6011 netif_carrier_off(tp->dev);
6013 spin_unlock(&tp->tx_lock);
6014 spin_unlock_irq(&tp->lock);
6016 free_irq(dev->irq, dev);
6018 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6019 sizeof(tp->net_stats_prev));
6020 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6021 sizeof(tp->estats_prev));
6023 tg3_free_consistent(tp);
6028 static inline unsigned long get_stat64(tg3_stat64_t *val)
6032 #if (BITS_PER_LONG == 32)
6035 ret = ((u64)val->high << 32) | ((u64)val->low);
6040 static unsigned long calc_crc_errors(struct tg3 *tp)
6042 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6044 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6045 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6047 unsigned long flags;
6050 spin_lock_irqsave(&tp->lock, flags);
6051 if (!tg3_readphy(tp, 0x1e, &val)) {
6052 tg3_writephy(tp, 0x1e, val | 0x8000);
6053 tg3_readphy(tp, 0x14, &val);
6056 spin_unlock_irqrestore(&tp->lock, flags);
6058 tp->phy_crc_errors += val;
6060 return tp->phy_crc_errors;
6063 return get_stat64(&hw_stats->rx_fcs_errors);
6066 #define ESTAT_ADD(member) \
6067 estats->member = old_estats->member + \
6068 get_stat64(&hw_stats->member)
6070 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6072 struct tg3_ethtool_stats *estats = &tp->estats;
6073 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6074 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6079 ESTAT_ADD(rx_octets);
6080 ESTAT_ADD(rx_fragments);
6081 ESTAT_ADD(rx_ucast_packets);
6082 ESTAT_ADD(rx_mcast_packets);
6083 ESTAT_ADD(rx_bcast_packets);
6084 ESTAT_ADD(rx_fcs_errors);
6085 ESTAT_ADD(rx_align_errors);
6086 ESTAT_ADD(rx_xon_pause_rcvd);
6087 ESTAT_ADD(rx_xoff_pause_rcvd);
6088 ESTAT_ADD(rx_mac_ctrl_rcvd);
6089 ESTAT_ADD(rx_xoff_entered);
6090 ESTAT_ADD(rx_frame_too_long_errors);
6091 ESTAT_ADD(rx_jabbers);
6092 ESTAT_ADD(rx_undersize_packets);
6093 ESTAT_ADD(rx_in_length_errors);
6094 ESTAT_ADD(rx_out_length_errors);
6095 ESTAT_ADD(rx_64_or_less_octet_packets);
6096 ESTAT_ADD(rx_65_to_127_octet_packets);
6097 ESTAT_ADD(rx_128_to_255_octet_packets);
6098 ESTAT_ADD(rx_256_to_511_octet_packets);
6099 ESTAT_ADD(rx_512_to_1023_octet_packets);
6100 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6101 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6102 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6103 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6104 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6106 ESTAT_ADD(tx_octets);
6107 ESTAT_ADD(tx_collisions);
6108 ESTAT_ADD(tx_xon_sent);
6109 ESTAT_ADD(tx_xoff_sent);
6110 ESTAT_ADD(tx_flow_control);
6111 ESTAT_ADD(tx_mac_errors);
6112 ESTAT_ADD(tx_single_collisions);
6113 ESTAT_ADD(tx_mult_collisions);
6114 ESTAT_ADD(tx_deferred);
6115 ESTAT_ADD(tx_excessive_collisions);
6116 ESTAT_ADD(tx_late_collisions);
6117 ESTAT_ADD(tx_collide_2times);
6118 ESTAT_ADD(tx_collide_3times);
6119 ESTAT_ADD(tx_collide_4times);
6120 ESTAT_ADD(tx_collide_5times);
6121 ESTAT_ADD(tx_collide_6times);
6122 ESTAT_ADD(tx_collide_7times);
6123 ESTAT_ADD(tx_collide_8times);
6124 ESTAT_ADD(tx_collide_9times);
6125 ESTAT_ADD(tx_collide_10times);
6126 ESTAT_ADD(tx_collide_11times);
6127 ESTAT_ADD(tx_collide_12times);
6128 ESTAT_ADD(tx_collide_13times);
6129 ESTAT_ADD(tx_collide_14times);
6130 ESTAT_ADD(tx_collide_15times);
6131 ESTAT_ADD(tx_ucast_packets);
6132 ESTAT_ADD(tx_mcast_packets);
6133 ESTAT_ADD(tx_bcast_packets);
6134 ESTAT_ADD(tx_carrier_sense_errors);
6135 ESTAT_ADD(tx_discards);
6136 ESTAT_ADD(tx_errors);
6138 ESTAT_ADD(dma_writeq_full);
6139 ESTAT_ADD(dma_write_prioq_full);
6140 ESTAT_ADD(rxbds_empty);
6141 ESTAT_ADD(rx_discards);
6142 ESTAT_ADD(rx_errors);
6143 ESTAT_ADD(rx_threshold_hit);
6145 ESTAT_ADD(dma_readq_full);
6146 ESTAT_ADD(dma_read_prioq_full);
6147 ESTAT_ADD(tx_comp_queue_full);
6149 ESTAT_ADD(ring_set_send_prod_index);
6150 ESTAT_ADD(ring_status_update);
6151 ESTAT_ADD(nic_irqs);
6152 ESTAT_ADD(nic_avoided_irqs);
6153 ESTAT_ADD(nic_tx_threshold_hit);
6158 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6160 struct tg3 *tp = netdev_priv(dev);
6161 struct net_device_stats *stats = &tp->net_stats;
6162 struct net_device_stats *old_stats = &tp->net_stats_prev;
6163 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6168 stats->rx_packets = old_stats->rx_packets +
6169 get_stat64(&hw_stats->rx_ucast_packets) +
6170 get_stat64(&hw_stats->rx_mcast_packets) +
6171 get_stat64(&hw_stats->rx_bcast_packets);
6173 stats->tx_packets = old_stats->tx_packets +
6174 get_stat64(&hw_stats->tx_ucast_packets) +
6175 get_stat64(&hw_stats->tx_mcast_packets) +
6176 get_stat64(&hw_stats->tx_bcast_packets);
6178 stats->rx_bytes = old_stats->rx_bytes +
6179 get_stat64(&hw_stats->rx_octets);
6180 stats->tx_bytes = old_stats->tx_bytes +
6181 get_stat64(&hw_stats->tx_octets);
6183 stats->rx_errors = old_stats->rx_errors +
6184 get_stat64(&hw_stats->rx_errors) +
6185 get_stat64(&hw_stats->rx_discards);
6186 stats->tx_errors = old_stats->tx_errors +
6187 get_stat64(&hw_stats->tx_errors) +
6188 get_stat64(&hw_stats->tx_mac_errors) +
6189 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6190 get_stat64(&hw_stats->tx_discards);
6192 stats->multicast = old_stats->multicast +
6193 get_stat64(&hw_stats->rx_mcast_packets);
6194 stats->collisions = old_stats->collisions +
6195 get_stat64(&hw_stats->tx_collisions);
6197 stats->rx_length_errors = old_stats->rx_length_errors +
6198 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6199 get_stat64(&hw_stats->rx_undersize_packets);
6201 stats->rx_over_errors = old_stats->rx_over_errors +
6202 get_stat64(&hw_stats->rxbds_empty);
6203 stats->rx_frame_errors = old_stats->rx_frame_errors +
6204 get_stat64(&hw_stats->rx_align_errors);
6205 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6206 get_stat64(&hw_stats->tx_discards);
6207 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6208 get_stat64(&hw_stats->tx_carrier_sense_errors);
6210 stats->rx_crc_errors = old_stats->rx_crc_errors +
6211 calc_crc_errors(tp);
6216 static inline u32 calc_crc(unsigned char *buf, int len)
6224 for (j = 0; j < len; j++) {
6227 for (k = 0; k < 8; k++) {
6241 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6243 /* accept or reject all multicast frames */
6244 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6245 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6246 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6247 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6250 static void __tg3_set_rx_mode(struct net_device *dev)
6252 struct tg3 *tp = netdev_priv(dev);
6255 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6256 RX_MODE_KEEP_VLAN_TAG);
6258 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6261 #if TG3_VLAN_TAG_USED
6263 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6264 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6266 /* By definition, VLAN is disabled always in this
6269 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6270 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6273 if (dev->flags & IFF_PROMISC) {
6274 /* Promiscuous mode. */
6275 rx_mode |= RX_MODE_PROMISC;
6276 } else if (dev->flags & IFF_ALLMULTI) {
6277 /* Accept all multicast. */
6278 tg3_set_multi (tp, 1);
6279 } else if (dev->mc_count < 1) {
6280 /* Reject all multicast. */
6281 tg3_set_multi (tp, 0);
6283 /* Accept one or more multicast(s). */
6284 struct dev_mc_list *mclist;
6286 u32 mc_filter[4] = { 0, };
6291 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6292 i++, mclist = mclist->next) {
6294 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6296 regidx = (bit & 0x60) >> 5;
6298 mc_filter[regidx] |= (1 << bit);
6301 tw32(MAC_HASH_REG_0, mc_filter[0]);
6302 tw32(MAC_HASH_REG_1, mc_filter[1]);
6303 tw32(MAC_HASH_REG_2, mc_filter[2]);
6304 tw32(MAC_HASH_REG_3, mc_filter[3]);
6307 if (rx_mode != tp->rx_mode) {
6308 tp->rx_mode = rx_mode;
6309 tw32_f(MAC_RX_MODE, rx_mode);
6314 static void tg3_set_rx_mode(struct net_device *dev)
6316 struct tg3 *tp = netdev_priv(dev);
6318 spin_lock_irq(&tp->lock);
6319 spin_lock(&tp->tx_lock);
6320 __tg3_set_rx_mode(dev);
6321 spin_unlock(&tp->tx_lock);
6322 spin_unlock_irq(&tp->lock);
6325 #define TG3_REGDUMP_LEN (32 * 1024)
6327 static int tg3_get_regs_len(struct net_device *dev)
6329 return TG3_REGDUMP_LEN;
6332 static void tg3_get_regs(struct net_device *dev,
6333 struct ethtool_regs *regs, void *_p)
6336 struct tg3 *tp = netdev_priv(dev);
6342 memset(p, 0, TG3_REGDUMP_LEN);
6344 spin_lock_irq(&tp->lock);
6345 spin_lock(&tp->tx_lock);
6347 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6348 #define GET_REG32_LOOP(base,len) \
6349 do { p = (u32 *)(orig_p + (base)); \
6350 for (i = 0; i < len; i += 4) \
6351 __GET_REG32((base) + i); \
6353 #define GET_REG32_1(reg) \
6354 do { p = (u32 *)(orig_p + (reg)); \
6355 __GET_REG32((reg)); \
6358 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6359 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6360 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6361 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6362 GET_REG32_1(SNDDATAC_MODE);
6363 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6364 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6365 GET_REG32_1(SNDBDC_MODE);
6366 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6367 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6368 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6369 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6370 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6371 GET_REG32_1(RCVDCC_MODE);
6372 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6373 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6374 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6375 GET_REG32_1(MBFREE_MODE);
6376 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6377 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6378 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6379 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6380 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6381 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6382 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6383 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6384 GET_REG32_LOOP(FTQ_RESET, 0x120);
6385 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6386 GET_REG32_1(DMAC_MODE);
6387 GET_REG32_LOOP(GRC_MODE, 0x4c);
6388 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6389 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6392 #undef GET_REG32_LOOP
6395 spin_unlock(&tp->tx_lock);
6396 spin_unlock_irq(&tp->lock);
6399 static int tg3_get_eeprom_len(struct net_device *dev)
6401 struct tg3 *tp = netdev_priv(dev);
6403 return tp->nvram_size;
6406 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6408 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6410 struct tg3 *tp = netdev_priv(dev);
6413 u32 i, offset, len, val, b_offset, b_count;
6415 offset = eeprom->offset;
6419 eeprom->magic = TG3_EEPROM_MAGIC;
6422 /* adjustments to start on required 4 byte boundary */
6423 b_offset = offset & 3;
6424 b_count = 4 - b_offset;
6425 if (b_count > len) {
6426 /* i.e. offset=1 len=2 */
6429 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6432 val = cpu_to_le32(val);
6433 memcpy(data, ((char*)&val) + b_offset, b_count);
6436 eeprom->len += b_count;
6439 /* read bytes upto the last 4 byte boundary */
6440 pd = &data[eeprom->len];
6441 for (i = 0; i < (len - (len & 3)); i += 4) {
6442 ret = tg3_nvram_read(tp, offset + i, &val);
6447 val = cpu_to_le32(val);
6448 memcpy(pd + i, &val, 4);
6453 /* read last bytes not ending on 4 byte boundary */
6454 pd = &data[eeprom->len];
6456 b_offset = offset + len - b_count;
6457 ret = tg3_nvram_read(tp, b_offset, &val);
6460 val = cpu_to_le32(val);
6461 memcpy(pd, ((char*)&val), b_count);
6462 eeprom->len += b_count;
6467 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
6469 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6471 struct tg3 *tp = netdev_priv(dev);
6473 u32 offset, len, b_offset, odd_len, start, end;
6476 if (eeprom->magic != TG3_EEPROM_MAGIC)
6479 offset = eeprom->offset;
6482 if ((b_offset = (offset & 3))) {
6483 /* adjustments to start on required 4 byte boundary */
6484 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6487 start = cpu_to_le32(start);
6493 if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6494 /* adjustments to end on required 4 byte boundary */
6496 len = (len + 3) & ~3;
6497 ret = tg3_nvram_read(tp, offset+len-4, &end);
6500 end = cpu_to_le32(end);
6504 if (b_offset || odd_len) {
6505 buf = kmalloc(len, GFP_KERNEL);
6509 memcpy(buf, &start, 4);
6511 memcpy(buf+len-4, &end, 4);
6512 memcpy(buf + b_offset, data, eeprom->len);
6515 ret = tg3_nvram_write_block(tp, offset, len, buf);
6523 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6525 struct tg3 *tp = netdev_priv(dev);
6527 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6528 tp->link_config.phy_is_low_power)
6531 cmd->supported = (SUPPORTED_Autoneg);
6533 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6534 cmd->supported |= (SUPPORTED_1000baseT_Half |
6535 SUPPORTED_1000baseT_Full);
6537 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6538 cmd->supported |= (SUPPORTED_100baseT_Half |
6539 SUPPORTED_100baseT_Full |
6540 SUPPORTED_10baseT_Half |
6541 SUPPORTED_10baseT_Full |
6544 cmd->supported |= SUPPORTED_FIBRE;
6546 cmd->advertising = tp->link_config.advertising;
6547 cmd->speed = tp->link_config.active_speed;
6548 cmd->duplex = tp->link_config.active_duplex;
6550 cmd->phy_address = PHY_ADDR;
6551 cmd->transceiver = 0;
6552 cmd->autoneg = tp->link_config.autoneg;
6558 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6560 struct tg3 *tp = netdev_priv(dev);
6562 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6563 tp->link_config.phy_is_low_power)
6566 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6567 /* These are the only valid advertisement bits allowed. */
6568 if (cmd->autoneg == AUTONEG_ENABLE &&
6569 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6570 ADVERTISED_1000baseT_Full |
6571 ADVERTISED_Autoneg |
6576 spin_lock_irq(&tp->lock);
6577 spin_lock(&tp->tx_lock);
6579 tp->link_config.autoneg = cmd->autoneg;
6580 if (cmd->autoneg == AUTONEG_ENABLE) {
6581 tp->link_config.advertising = cmd->advertising;
6582 tp->link_config.speed = SPEED_INVALID;
6583 tp->link_config.duplex = DUPLEX_INVALID;
6585 tp->link_config.advertising = 0;
6586 tp->link_config.speed = cmd->speed;
6587 tp->link_config.duplex = cmd->duplex;
6590 tg3_setup_phy(tp, 1);
6591 spin_unlock(&tp->tx_lock);
6592 spin_unlock_irq(&tp->lock);
6597 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6599 struct tg3 *tp = netdev_priv(dev);
6601 strcpy(info->driver, DRV_MODULE_NAME);
6602 strcpy(info->version, DRV_MODULE_VERSION);
6603 strcpy(info->bus_info, pci_name(tp->pdev));
6606 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6608 struct tg3 *tp = netdev_priv(dev);
6610 wol->supported = WAKE_MAGIC;
6612 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6613 wol->wolopts = WAKE_MAGIC;
6614 memset(&wol->sopass, 0, sizeof(wol->sopass));
6617 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6619 struct tg3 *tp = netdev_priv(dev);
6621 if (wol->wolopts & ~WAKE_MAGIC)
6623 if ((wol->wolopts & WAKE_MAGIC) &&
6624 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6625 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6628 spin_lock_irq(&tp->lock);
6629 if (wol->wolopts & WAKE_MAGIC)
6630 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6632 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6633 spin_unlock_irq(&tp->lock);
6638 static u32 tg3_get_msglevel(struct net_device *dev)
6640 struct tg3 *tp = netdev_priv(dev);
6641 return tp->msg_enable;
6644 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6646 struct tg3 *tp = netdev_priv(dev);
6647 tp->msg_enable = value;
6650 #if TG3_TSO_SUPPORT != 0
6651 static int tg3_set_tso(struct net_device *dev, u32 value)
6653 struct tg3 *tp = netdev_priv(dev);
6655 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6660 return ethtool_op_set_tso(dev, value);
6664 static int tg3_nway_reset(struct net_device *dev)
6666 struct tg3 *tp = netdev_priv(dev);
6670 spin_lock_irq(&tp->lock);
6672 tg3_readphy(tp, MII_BMCR, &bmcr);
6673 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6674 (bmcr & BMCR_ANENABLE)) {
6675 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6678 spin_unlock_irq(&tp->lock);
6683 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6685 struct tg3 *tp = netdev_priv(dev);
6687 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6688 ering->rx_mini_max_pending = 0;
6689 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6691 ering->rx_pending = tp->rx_pending;
6692 ering->rx_mini_pending = 0;
6693 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6694 ering->tx_pending = tp->tx_pending;
6697 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6699 struct tg3 *tp = netdev_priv(dev);
6701 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6702 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6703 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6707 spin_lock_irq(&tp->lock);
6708 spin_lock(&tp->tx_lock);
6710 tp->rx_pending = ering->rx_pending;
6712 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6713 tp->rx_pending > 63)
6714 tp->rx_pending = 63;
6715 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6716 tp->tx_pending = ering->tx_pending;
6720 tg3_netif_start(tp);
6721 spin_unlock(&tp->tx_lock);
6722 spin_unlock_irq(&tp->lock);
6727 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6729 struct tg3 *tp = netdev_priv(dev);
6731 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6732 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6733 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6736 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6738 struct tg3 *tp = netdev_priv(dev);
6741 spin_lock_irq(&tp->lock);
6742 spin_lock(&tp->tx_lock);
6743 if (epause->autoneg)
6744 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6746 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6747 if (epause->rx_pause)
6748 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6750 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6751 if (epause->tx_pause)
6752 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6754 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6757 tg3_netif_start(tp);
6758 spin_unlock(&tp->tx_lock);
6759 spin_unlock_irq(&tp->lock);
6764 static u32 tg3_get_rx_csum(struct net_device *dev)
6766 struct tg3 *tp = netdev_priv(dev);
6767 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6770 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6772 struct tg3 *tp = netdev_priv(dev);
6774 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6780 spin_lock_irq(&tp->lock);
6782 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6784 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6785 spin_unlock_irq(&tp->lock);
6790 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6792 struct tg3 *tp = netdev_priv(dev);
6794 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6801 dev->features |= NETIF_F_IP_CSUM;
6803 dev->features &= ~NETIF_F_IP_CSUM;
6808 static int tg3_get_stats_count (struct net_device *dev)
6810 return TG3_NUM_STATS;
6813 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6815 switch (stringset) {
6817 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
6820 WARN_ON(1); /* we need a WARN() */
6825 static void tg3_get_ethtool_stats (struct net_device *dev,
6826 struct ethtool_stats *estats, u64 *tmp_stats)
6828 struct tg3 *tp = netdev_priv(dev);
6829 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6832 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6834 struct mii_ioctl_data *data = if_mii(ifr);
6835 struct tg3 *tp = netdev_priv(dev);
6840 data->phy_id = PHY_ADDR;
6846 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6847 break; /* We have no PHY */
6849 spin_lock_irq(&tp->lock);
6850 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6851 spin_unlock_irq(&tp->lock);
6853 data->val_out = mii_regval;
6859 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6860 break; /* We have no PHY */
6862 if (!capable(CAP_NET_ADMIN))
6865 spin_lock_irq(&tp->lock);
6866 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6867 spin_unlock_irq(&tp->lock);
6878 #if TG3_VLAN_TAG_USED
6879 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6881 struct tg3 *tp = netdev_priv(dev);
6883 spin_lock_irq(&tp->lock);
6884 spin_lock(&tp->tx_lock);
6888 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6889 __tg3_set_rx_mode(dev);
6891 spin_unlock(&tp->tx_lock);
6892 spin_unlock_irq(&tp->lock);
6895 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6897 struct tg3 *tp = netdev_priv(dev);
6899 spin_lock_irq(&tp->lock);
6900 spin_lock(&tp->tx_lock);
6902 tp->vlgrp->vlan_devices[vid] = NULL;
6903 spin_unlock(&tp->tx_lock);
6904 spin_unlock_irq(&tp->lock);
6908 static struct ethtool_ops tg3_ethtool_ops = {
6909 .get_settings = tg3_get_settings,
6910 .set_settings = tg3_set_settings,
6911 .get_drvinfo = tg3_get_drvinfo,
6912 .get_regs_len = tg3_get_regs_len,
6913 .get_regs = tg3_get_regs,
6914 .get_wol = tg3_get_wol,
6915 .set_wol = tg3_set_wol,
6916 .get_msglevel = tg3_get_msglevel,
6917 .set_msglevel = tg3_set_msglevel,
6918 .nway_reset = tg3_nway_reset,
6919 .get_link = ethtool_op_get_link,
6920 .get_eeprom_len = tg3_get_eeprom_len,
6921 .get_eeprom = tg3_get_eeprom,
6922 .set_eeprom = tg3_set_eeprom,
6923 .get_ringparam = tg3_get_ringparam,
6924 .set_ringparam = tg3_set_ringparam,
6925 .get_pauseparam = tg3_get_pauseparam,
6926 .set_pauseparam = tg3_set_pauseparam,
6927 .get_rx_csum = tg3_get_rx_csum,
6928 .set_rx_csum = tg3_set_rx_csum,
6929 .get_tx_csum = ethtool_op_get_tx_csum,
6930 .set_tx_csum = tg3_set_tx_csum,
6931 .get_sg = ethtool_op_get_sg,
6932 .set_sg = ethtool_op_set_sg,
6933 #if TG3_TSO_SUPPORT != 0
6934 .get_tso = ethtool_op_get_tso,
6935 .set_tso = tg3_set_tso,
6937 .get_strings = tg3_get_strings,
6938 .get_stats_count = tg3_get_stats_count,
6939 .get_ethtool_stats = tg3_get_ethtool_stats,
6942 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6946 tp->nvram_size = EEPROM_CHIP_SIZE;
6948 if (tg3_nvram_read(tp, 0, &val) != 0)
6951 if (swab32(val) != TG3_EEPROM_MAGIC)
6955 * Size the chip by reading offsets at increasing powers of two.
6956 * When we encounter our validation signature, we know the addressing
6957 * has wrapped around, and thus have our chip size.
6961 while (cursize < tp->nvram_size) {
6962 if (tg3_nvram_read(tp, cursize, &val) != 0)
6965 if (swab32(val) == TG3_EEPROM_MAGIC)
6971 tp->nvram_size = cursize;
6974 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
6978 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
6980 tp->nvram_size = (val >> 16) * 1024;
6984 tp->nvram_size = 0x20000;
6987 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
6991 nvcfg1 = tr32(NVRAM_CFG1);
6992 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6993 tp->tg3_flags2 |= TG3_FLG2_FLASH;
6996 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6997 tw32(NVRAM_CFG1, nvcfg1);
7000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7001 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7002 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7003 tp->nvram_jedecnum = JEDEC_ATMEL;
7004 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7005 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7007 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7008 tp->nvram_jedecnum = JEDEC_ATMEL;
7009 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7011 case FLASH_VENDOR_ATMEL_EEPROM:
7012 tp->nvram_jedecnum = JEDEC_ATMEL;
7013 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7014 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7016 case FLASH_VENDOR_ST:
7017 tp->nvram_jedecnum = JEDEC_ST;
7018 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7019 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7021 case FLASH_VENDOR_SAIFUN:
7022 tp->nvram_jedecnum = JEDEC_SAIFUN;
7023 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7025 case FLASH_VENDOR_SST_SMALL:
7026 case FLASH_VENDOR_SST_LARGE:
7027 tp->nvram_jedecnum = JEDEC_SST;
7028 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7033 tp->nvram_jedecnum = JEDEC_ATMEL;
7034 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7035 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7039 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7040 static void __devinit tg3_nvram_init(struct tg3 *tp)
7044 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7047 tw32_f(GRC_EEPROM_ADDR,
7048 (EEPROM_ADDR_FSM_RESET |
7049 (EEPROM_DEFAULT_CLOCK_PERIOD <<
7050 EEPROM_ADDR_CLKPERD_SHIFT)));
7052 /* XXX schedule_timeout() ... */
7053 for (j = 0; j < 100; j++)
7056 /* Enable seeprom accesses. */
7057 tw32_f(GRC_LOCAL_CTRL,
7058 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7061 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7062 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7063 tp->tg3_flags |= TG3_FLAG_NVRAM;
7065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7066 u32 nvaccess = tr32(NVRAM_ACCESS);
7068 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7071 tg3_get_nvram_info(tp);
7072 tg3_get_nvram_size(tp);
7074 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7075 u32 nvaccess = tr32(NVRAM_ACCESS);
7077 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7081 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7083 tg3_get_eeprom_size(tp);
7087 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7088 u32 offset, u32 *val)
7093 if (offset > EEPROM_ADDR_ADDR_MASK ||
7097 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7098 EEPROM_ADDR_DEVID_MASK |
7100 tw32(GRC_EEPROM_ADDR,
7102 (0 << EEPROM_ADDR_DEVID_SHIFT) |
7103 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7104 EEPROM_ADDR_ADDR_MASK) |
7105 EEPROM_ADDR_READ | EEPROM_ADDR_START);
7107 for (i = 0; i < 10000; i++) {
7108 tmp = tr32(GRC_EEPROM_ADDR);
7110 if (tmp & EEPROM_ADDR_COMPLETE)
7114 if (!(tmp & EEPROM_ADDR_COMPLETE))
7117 *val = tr32(GRC_EEPROM_DATA);
7121 #define NVRAM_CMD_TIMEOUT 10000
7123 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7127 tw32(NVRAM_CMD, nvram_cmd);
7128 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7130 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7135 if (i == NVRAM_CMD_TIMEOUT) {
7141 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7145 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7146 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7150 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7151 return tg3_nvram_read_using_eeprom(tp, offset, val);
7153 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7154 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7155 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7157 offset = ((offset / tp->nvram_pagesize) <<
7158 ATMEL_AT45DB0X1B_PAGE_POS) +
7159 (offset % tp->nvram_pagesize);
7162 if (offset > NVRAM_ADDR_MSK)
7167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7168 u32 nvaccess = tr32(NVRAM_ACCESS);
7170 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7173 tw32(NVRAM_ADDR, offset);
7174 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7175 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7178 *val = swab32(tr32(NVRAM_RDDATA));
7180 tg3_nvram_unlock(tp);
7182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7183 u32 nvaccess = tr32(NVRAM_ACCESS);
7185 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7191 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7192 u32 offset, u32 len, u8 *buf)
7197 for (i = 0; i < len; i += 4) {
7202 memcpy(&data, buf + i, 4);
7204 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7206 val = tr32(GRC_EEPROM_ADDR);
7207 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7209 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7211 tw32(GRC_EEPROM_ADDR, val |
7212 (0 << EEPROM_ADDR_DEVID_SHIFT) |
7213 (addr & EEPROM_ADDR_ADDR_MASK) |
7217 for (j = 0; j < 10000; j++) {
7218 val = tr32(GRC_EEPROM_ADDR);
7220 if (val & EEPROM_ADDR_COMPLETE)
7224 if (!(val & EEPROM_ADDR_COMPLETE)) {
7233 /* offset and length are dword aligned */
7234 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7238 u32 pagesize = tp->nvram_pagesize;
7239 u32 pagemask = pagesize - 1;
7243 tmp = kmalloc(pagesize, GFP_KERNEL);
7249 u32 phy_addr, page_off, size, nvaccess;
7251 phy_addr = offset & ~pagemask;
7253 for (j = 0; j < pagesize; j += 4) {
7254 if ((ret = tg3_nvram_read(tp, phy_addr + j,
7255 (u32 *) (tmp + j))))
7261 page_off = offset & pagemask;
7268 memcpy(tmp + page_off, buf, size);
7270 offset = offset + (pagesize - page_off);
7272 nvaccess = tr32(NVRAM_ACCESS);
7273 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7276 * Before we can erase the flash page, we need
7277 * to issue a special "write enable" command.
7279 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7281 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7284 /* Erase the target page */
7285 tw32(NVRAM_ADDR, phy_addr);
7287 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7288 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7290 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7293 /* Issue another write enable to start the write. */
7294 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7296 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7299 for (j = 0; j < pagesize; j += 4) {
7302 data = *((u32 *) (tmp + j));
7303 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7305 tw32(NVRAM_ADDR, phy_addr + j);
7307 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7311 nvram_cmd |= NVRAM_CMD_FIRST;
7312 else if (j == (pagesize - 4))
7313 nvram_cmd |= NVRAM_CMD_LAST;
7315 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7322 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7323 tg3_nvram_exec_cmd(tp, nvram_cmd);
7330 /* offset and length are dword aligned */
7331 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7336 for (i = 0; i < len; i += 4, offset += 4) {
7337 u32 data, page_off, phy_addr, nvram_cmd;
7339 memcpy(&data, buf + i, 4);
7340 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7342 page_off = offset % tp->nvram_pagesize;
7344 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7345 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7347 phy_addr = ((offset / tp->nvram_pagesize) <<
7348 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7354 tw32(NVRAM_ADDR, phy_addr);
7356 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7358 if ((page_off == 0) || (i == 0))
7359 nvram_cmd |= NVRAM_CMD_FIRST;
7360 else if (page_off == (tp->nvram_pagesize - 4))
7361 nvram_cmd |= NVRAM_CMD_LAST;
7364 nvram_cmd |= NVRAM_CMD_LAST;
7366 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7367 (nvram_cmd & NVRAM_CMD_FIRST)) {
7369 if ((ret = tg3_nvram_exec_cmd(tp,
7370 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7375 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7376 /* We always do complete word writes to eeprom. */
7377 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7380 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7386 /* offset and length are dword aligned */
7387 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7391 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7392 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7396 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7397 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7398 GRC_LCLCTRL_GPIO_OE1);
7402 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7403 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7410 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7411 u32 nvaccess = tr32(NVRAM_ACCESS);
7413 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7415 tw32(NVRAM_WRITE1, 0x406);
7418 grc_mode = tr32(GRC_MODE);
7419 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7421 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7422 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7424 ret = tg3_nvram_write_block_buffered(tp, offset, len,
7428 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7432 grc_mode = tr32(GRC_MODE);
7433 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7436 u32 nvaccess = tr32(NVRAM_ACCESS);
7438 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7440 tg3_nvram_unlock(tp);
7443 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7444 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7445 GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7452 struct subsys_tbl_ent {
7453 u16 subsys_vendor, subsys_devid;
7457 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7458 /* Broadcom boards. */
7459 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7460 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7461 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7462 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
7463 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7464 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7465 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
7466 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7467 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7468 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7469 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7472 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7473 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7474 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
7475 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7476 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7479 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7480 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7481 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7482 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7484 /* Compaq boards. */
7485 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7486 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7487 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
7488 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7489 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7492 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7495 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7499 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7500 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7501 tp->pdev->subsystem_vendor) &&
7502 (subsys_id_to_phy_id[i].subsys_devid ==
7503 tp->pdev->subsystem_device))
7504 return &subsys_id_to_phy_id[i];
7509 static int __devinit tg3_phy_probe(struct tg3 *tp)
7511 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7512 u32 hw_phy_id, hw_phy_id_masked;
7514 int eeprom_signature_found, eeprom_phy_serdes, err;
7516 tp->phy_id = PHY_ID_INVALID;
7517 eeprom_phy_id = PHY_ID_INVALID;
7518 eeprom_phy_serdes = 0;
7519 eeprom_signature_found = 0;
7520 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7521 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7522 u32 nic_cfg, led_cfg;
7523 u32 nic_phy_id, ver, cfg2 = 0;
7525 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7526 tp->nic_sram_data_cfg = nic_cfg;
7528 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7529 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7530 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7531 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7532 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7533 (ver > 0) && (ver < 0x100))
7534 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7536 eeprom_signature_found = 1;
7538 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7539 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7540 eeprom_phy_serdes = 1;
7542 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7543 if (nic_phy_id != 0) {
7544 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7545 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7547 eeprom_phy_id = (id1 >> 16) << 10;
7548 eeprom_phy_id |= (id2 & 0xfc00) << 16;
7549 eeprom_phy_id |= (id2 & 0x03ff) << 0;
7553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7554 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7555 SHASTA_EXT_LED_MODE_MASK);
7557 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7561 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7562 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7565 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7566 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7569 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7570 tp->led_ctrl = LED_CTRL_MODE_MAC;
7573 case SHASTA_EXT_LED_SHARED:
7574 tp->led_ctrl = LED_CTRL_MODE_SHARED;
7575 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7576 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7577 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7578 LED_CTRL_MODE_PHY_2);
7581 case SHASTA_EXT_LED_MAC:
7582 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7585 case SHASTA_EXT_LED_COMBO:
7586 tp->led_ctrl = LED_CTRL_MODE_COMBO;
7587 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7588 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7589 LED_CTRL_MODE_PHY_2);
7594 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7595 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7596 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7597 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7599 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7600 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7601 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7602 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7604 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7605 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7607 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7609 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7610 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7612 if (cfg2 & (1 << 17))
7613 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7615 /* serdes signal pre-emphasis in register 0x590 set by */
7616 /* bootcode if bit 18 is set */
7617 if (cfg2 & (1 << 18))
7618 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7621 /* Reading the PHY ID register can conflict with ASF
7622 * firwmare access to the PHY hardware.
7625 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7626 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7628 /* Now read the physical PHY_ID from the chip and verify
7629 * that it is sane. If it doesn't look good, we fall back
7630 * to either the hard-coded table based PHY_ID and failing
7631 * that the value found in the eeprom area.
7633 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7634 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7636 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
7637 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7638 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
7640 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7643 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7644 tp->phy_id = hw_phy_id;
7645 if (hw_phy_id_masked == PHY_ID_BCM8002)
7646 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7648 if (eeprom_signature_found) {
7649 tp->phy_id = eeprom_phy_id;
7650 if (eeprom_phy_serdes)
7651 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7653 struct subsys_tbl_ent *p;
7655 /* No eeprom signature? Try the hardcoded
7656 * subsys device table.
7658 p = lookup_by_subsys(tp);
7662 tp->phy_id = p->phy_id;
7664 tp->phy_id == PHY_ID_BCM8002)
7665 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7669 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7670 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7671 u32 bmsr, adv_reg, tg3_ctrl;
7673 tg3_readphy(tp, MII_BMSR, &bmsr);
7674 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7675 (bmsr & BMSR_LSTATUS))
7676 goto skip_phy_reset;
7678 err = tg3_phy_reset(tp);
7682 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7683 ADVERTISE_100HALF | ADVERTISE_100FULL |
7684 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7686 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7687 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7688 MII_TG3_CTRL_ADV_1000_FULL);
7689 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7690 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7691 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7692 MII_TG3_CTRL_ENABLE_AS_MASTER);
7695 if (!tg3_copper_is_advertising_all(tp)) {
7696 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7698 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7699 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7701 tg3_writephy(tp, MII_BMCR,
7702 BMCR_ANENABLE | BMCR_ANRESTART);
7704 tg3_phy_set_wirespeed(tp);
7706 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7707 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7708 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7712 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7713 err = tg3_init_5401phy_dsp(tp);
7718 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7719 err = tg3_init_5401phy_dsp(tp);
7722 if (!eeprom_signature_found)
7723 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7725 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7726 tp->link_config.advertising =
7727 (ADVERTISED_1000baseT_Half |
7728 ADVERTISED_1000baseT_Full |
7729 ADVERTISED_Autoneg |
7731 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7732 tp->link_config.advertising &=
7733 ~(ADVERTISED_1000baseT_Half |
7734 ADVERTISED_1000baseT_Full);
7739 static void __devinit tg3_read_partno(struct tg3 *tp)
7741 unsigned char vpd_data[256];
7744 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7745 /* Sun decided not to put the necessary bits in the
7746 * NVRAM of their onboard tg3 parts :(
7748 strcpy(tp->board_part_number, "Sun 570X");
7752 for (i = 0; i < 256; i += 4) {
7755 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7758 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
7759 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
7760 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7761 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7764 /* Now parse and find the part number. */
7765 for (i = 0; i < 256; ) {
7766 unsigned char val = vpd_data[i];
7769 if (val == 0x82 || val == 0x91) {
7772 (vpd_data[i + 2] << 8)));
7779 block_end = (i + 3 +
7781 (vpd_data[i + 2] << 8)));
7783 while (i < block_end) {
7784 if (vpd_data[i + 0] == 'P' &&
7785 vpd_data[i + 1] == 'N') {
7786 int partno_len = vpd_data[i + 2];
7788 if (partno_len > 24)
7791 memcpy(tp->board_part_number,
7800 /* Part number not found. */
7805 strcpy(tp->board_part_number, "none");
7808 #ifdef CONFIG_SPARC64
7809 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7811 struct pci_dev *pdev = tp->pdev;
7812 struct pcidev_cookie *pcp = pdev->sysdata;
7815 int node = pcp->prom_node;
7819 err = prom_getproperty(node, "subsystem-vendor-id",
7820 (char *) &venid, sizeof(venid));
7821 if (err == 0 || err == -1)
7823 if (venid == PCI_VENDOR_ID_SUN)
7830 static int __devinit tg3_get_invariants(struct tg3 *tp)
7833 u32 cacheline_sz_reg;
7834 u32 pci_state_reg, grc_misc_cfg;
7839 #ifdef CONFIG_SPARC64
7840 if (tg3_is_sun_570X(tp))
7841 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7844 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7845 * reordering to the mailbox registers done by the host
7846 * controller can cause major troubles. We read back from
7847 * every mailbox register write to force the writes to be
7848 * posted to the chip in order.
7850 if (pci_find_device(PCI_VENDOR_ID_INTEL,
7851 PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7852 pci_find_device(PCI_VENDOR_ID_INTEL,
7853 PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7854 pci_find_device(PCI_VENDOR_ID_INTEL,
7855 PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7856 pci_find_device(PCI_VENDOR_ID_INTEL,
7857 PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7858 pci_find_device(PCI_VENDOR_ID_AMD,
7859 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7860 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7862 /* Force memory write invalidate off. If we leave it on,
7863 * then on 5700_BX chips we have to enable a workaround.
7864 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7865 * to match the cacheline size. The Broadcom driver have this
7866 * workaround but turns MWI off all the times so never uses
7867 * it. This seems to suggest that the workaround is insufficient.
7869 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7870 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7871 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7873 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7874 * has the register indirect write enable bit set before
7875 * we try to access any of the MMIO registers. It is also
7876 * critical that the PCI-X hw workaround situation is decided
7877 * before that as well.
7879 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7882 tp->pci_chip_rev_id = (misc_ctrl_reg >>
7883 MISC_HOST_CTRL_CHIPREV_SHIFT);
7885 /* Initialize misc host control in PCI block. */
7886 tp->misc_host_ctrl |= (misc_ctrl_reg &
7887 MISC_HOST_CTRL_CHIPREV);
7888 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7889 tp->misc_host_ctrl);
7891 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7894 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
7895 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
7896 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
7897 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
7899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7900 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7902 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7903 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7906 tp->pci_lat_timer < 64) {
7907 tp->pci_lat_timer = 64;
7909 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
7910 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
7911 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
7912 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
7914 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7918 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7921 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7922 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7924 /* If this is a 5700 BX chipset, and we are in PCI-X
7925 * mode, enable register write workaround.
7927 * The workaround is to use indirect register accesses
7928 * for all chip writes not to mailbox registers.
7930 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7934 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7936 /* The chip can have it's power management PCI config
7937 * space registers clobbered due to this bug.
7938 * So explicitly force the chip into D0 here.
7940 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7942 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7943 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7944 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7947 /* Also, force SERR#/PERR# in PCI command. */
7948 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7949 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7950 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7954 /* Back to back register writes can cause problems on this chip,
7955 * the workaround is to read back all reg writes except those to
7956 * mailbox regs. See tg3_write_indirect_reg32().
7958 * PCI Express 5750_A0 rev chips need this workaround too.
7960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7961 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7962 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7963 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7965 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7966 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7967 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7968 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7970 /* Chip-specific fixup from Broadcom driver */
7971 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7972 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7973 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7974 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7977 /* Force the chip into D0. */
7978 err = tg3_set_power_state(tp, 0);
7980 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7981 pci_name(tp->pdev));
7985 /* 5700 B0 chips do not support checksumming correctly due
7988 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7989 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7991 /* Pseudo-header checksum is done by hardware logic and not
7992 * the offload processers, so make the chip do the pseudo-
7993 * header checksums on receive. For transmit it is more
7994 * convenient to do the pseudo-header checksum in software
7995 * as Linux does that on transmit for us in all cases.
7997 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7998 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8000 /* Derive initial jumbo mode from MTU assigned in
8001 * ether_setup() via the alloc_etherdev() call
8003 if (tp->dev->mtu > ETH_DATA_LEN)
8004 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8006 /* Determine WakeOnLan speed to use. */
8007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8008 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8009 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8010 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8011 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8013 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8016 /* A few boards don't want Ethernet@WireSpeed phy feature */
8017 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8018 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8019 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8020 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8021 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8023 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8024 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8025 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8026 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8027 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8031 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8033 /* Only 5701 and later support tagged irq status mode.
8034 * Also, 5788 chips cannot use tagged irq status.
8036 * However, since we are using NAPI avoid tagged irq status
8037 * because the interrupt condition is more difficult to
8038 * fully clear in that mode.
8040 tp->coalesce_mode = 0;
8042 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8043 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8044 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8046 /* Initialize MAC MI mode, polling disabled. */
8047 tw32_f(MAC_MI_MODE, tp->mi_mode);
8050 /* Initialize data/descriptor byte/word swapping. */
8051 val = tr32(GRC_MODE);
8052 val &= GRC_MODE_HOST_STACKUP;
8053 tw32(GRC_MODE, val | tp->grc_mode);
8055 tg3_switch_clocks(tp);
8057 /* Clear this out for sanity. */
8058 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8060 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8062 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8063 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8064 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8066 if (chiprevid == CHIPREV_ID_5701_A0 ||
8067 chiprevid == CHIPREV_ID_5701_B0 ||
8068 chiprevid == CHIPREV_ID_5701_B2 ||
8069 chiprevid == CHIPREV_ID_5701_B5) {
8070 void __iomem *sram_base;
8072 /* Write some dummy words into the SRAM status block
8073 * area, see if it reads back correctly. If the return
8074 * value is bad, force enable the PCIX workaround.
8076 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8078 writel(0x00000000, sram_base);
8079 writel(0x00000000, sram_base + 4);
8080 writel(0xffffffff, sram_base + 4);
8081 if (readl(sram_base) != 0x00000000)
8082 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8089 grc_misc_cfg = tr32(GRC_MISC_CFG);
8090 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8092 /* Broadcom's driver says that CIOBE multisplit has a bug */
8094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8095 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8096 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8097 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8101 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8102 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8103 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8105 /* these are limited to 10/100 only */
8106 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8107 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8108 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8109 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8110 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8111 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8112 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8113 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8114 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8115 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8116 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8118 err = tg3_phy_probe(tp);
8120 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8121 pci_name(tp->pdev), err);
8122 /* ... but do not return immediately ... */
8125 tg3_read_partno(tp);
8127 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8128 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8131 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8133 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8136 /* 5700 {AX,BX} chips have a broken status block link
8137 * change bit implementation, so we must use the
8138 * status register in those cases.
8140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8141 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8143 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8145 /* The led_ctrl is set during tg3_phy_probe, here we might
8146 * have to force the link status polling mechanism based
8147 * upon subsystem IDs.
8149 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8150 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8151 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8152 TG3_FLAG_USE_LINKCHG_REG);
8155 /* For all SERDES we poll the MAC status register. */
8156 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8157 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8159 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8161 /* 5700 BX chips need to have their TX producer index mailboxes
8162 * written twice to workaround a bug.
8164 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8165 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8167 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8169 /* It seems all chips can get confused if TX buffers
8170 * straddle the 4GB address boundary in some cases.
8172 tp->dev->hard_start_xmit = tg3_start_xmit;
8175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8176 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8179 /* By default, disable wake-on-lan. User can change this
8180 * using ETHTOOL_SWOL.
8182 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8187 #ifdef CONFIG_SPARC64
8188 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8190 struct net_device *dev = tp->dev;
8191 struct pci_dev *pdev = tp->pdev;
8192 struct pcidev_cookie *pcp = pdev->sysdata;
8195 int node = pcp->prom_node;
8197 if (prom_getproplen(node, "local-mac-address") == 6) {
8198 prom_getproperty(node, "local-mac-address",
8206 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8208 struct net_device *dev = tp->dev;
8210 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8215 static int __devinit tg3_get_device_address(struct tg3 *tp)
8217 struct net_device *dev = tp->dev;
8218 u32 hi, lo, mac_offset;
8220 #ifdef CONFIG_SPARC64
8221 if (!tg3_get_macaddr_sparc(tp))
8226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8227 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8228 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8230 if (tg3_nvram_lock(tp))
8231 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8233 tg3_nvram_unlock(tp);
8236 /* First try to get it from MAC address mailbox. */
8237 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8238 if ((hi >> 16) == 0x484b) {
8239 dev->dev_addr[0] = (hi >> 8) & 0xff;
8240 dev->dev_addr[1] = (hi >> 0) & 0xff;
8242 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8243 dev->dev_addr[2] = (lo >> 24) & 0xff;
8244 dev->dev_addr[3] = (lo >> 16) & 0xff;
8245 dev->dev_addr[4] = (lo >> 8) & 0xff;
8246 dev->dev_addr[5] = (lo >> 0) & 0xff;
8248 /* Next, try NVRAM. */
8249 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8250 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8251 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8252 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8253 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8254 dev->dev_addr[2] = ((lo >> 0) & 0xff);
8255 dev->dev_addr[3] = ((lo >> 8) & 0xff);
8256 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8257 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8259 /* Finally just fetch it out of the MAC control regs. */
8261 hi = tr32(MAC_ADDR_0_HIGH);
8262 lo = tr32(MAC_ADDR_0_LOW);
8264 dev->dev_addr[5] = lo & 0xff;
8265 dev->dev_addr[4] = (lo >> 8) & 0xff;
8266 dev->dev_addr[3] = (lo >> 16) & 0xff;
8267 dev->dev_addr[2] = (lo >> 24) & 0xff;
8268 dev->dev_addr[1] = hi & 0xff;
8269 dev->dev_addr[0] = (hi >> 8) & 0xff;
8272 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8273 #ifdef CONFIG_SPARC64
8274 if (!tg3_get_default_macaddr_sparc(tp))
8282 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8284 struct tg3_internal_buffer_desc test_desc;
8288 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8290 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8291 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8292 tw32(RDMAC_STATUS, 0);
8293 tw32(WDMAC_STATUS, 0);
8295 tw32(BUFMGR_MODE, 0);
8298 test_desc.addr_hi = ((u64) buf_dma) >> 32;
8299 test_desc.addr_lo = buf_dma & 0xffffffff;
8300 test_desc.nic_mbuf = 0x00002100;
8301 test_desc.len = size;
8304 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8305 * the *second* time the tg3 driver was getting loaded after an
8308 * Broadcom tells me:
8309 * ...the DMA engine is connected to the GRC block and a DMA
8310 * reset may affect the GRC block in some unpredictable way...
8311 * The behavior of resets to individual blocks has not been tested.
8313 * Broadcom noted the GRC reset will also reset all sub-components.
8316 test_desc.cqid_sqid = (13 << 8) | 2;
8318 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8321 test_desc.cqid_sqid = (16 << 8) | 7;
8323 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8326 test_desc.flags = 0x00000005;
8328 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8331 val = *(((u32 *)&test_desc) + i);
8332 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8333 sram_dma_descs + (i * sizeof(u32)));
8334 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8336 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8339 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8341 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8345 for (i = 0; i < 40; i++) {
8349 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8351 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8352 if ((val & 0xffff) == sram_dma_descs) {
8363 #define TEST_BUFFER_SIZE 0x400
8365 static int __devinit tg3_test_dma(struct tg3 *tp)
8371 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8377 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8378 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8384 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8387 cacheline_size = 1024;
8389 cacheline_size = (int) byte * 4;
8391 switch (cacheline_size) {
8396 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8397 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8399 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8401 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8403 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8405 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8410 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8411 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8413 DMA_RWCTRL_WRITE_BNDRY_256;
8414 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8416 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8421 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8422 /* DMA read watermark not used on PCIE */
8423 tp->dma_rwctrl |= 0x00180000;
8424 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8427 tp->dma_rwctrl |= 0x003f0000;
8429 tp->dma_rwctrl |= 0x003f000f;
8431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8433 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8435 if (ccval == 0x6 || ccval == 0x7)
8436 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8438 /* Set bit 23 to renable PCIX hw bug fix */
8439 tp->dma_rwctrl |= 0x009f0000;
8441 tp->dma_rwctrl |= 0x001b000f;
8445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8447 tp->dma_rwctrl &= 0xfffffff0;
8449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8451 /* Remove this if it causes problems for some boards. */
8452 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8454 /* On 5700/5701 chips, we need to set this bit.
8455 * Otherwise the chip will issue cacheline transactions
8456 * to streamable DMA memory with not all the byte
8457 * enables turned on. This is an error on several
8458 * RISC PCI controllers, in particular sparc64.
8460 * On 5703/5704 chips, this bit has been reassigned
8461 * a different meaning. In particular, it is used
8462 * on those chips to enable a PCI-X workaround.
8464 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8467 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8470 /* Unneeded, already done by tg3_get_invariants. */
8471 tg3_switch_clocks(tp);
8475 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8476 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8482 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8485 /* Send the buffer to the chip. */
8486 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8488 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8493 /* validate data reached card RAM correctly. */
8494 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8496 tg3_read_mem(tp, 0x2100 + (i*4), &val);
8497 if (le32_to_cpu(val) != p[i]) {
8498 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
8499 /* ret = -ENODEV here? */
8504 /* Now read it back. */
8505 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8507 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8513 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8517 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8518 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8519 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8520 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8523 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8529 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8537 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8542 static void __devinit tg3_init_link_config(struct tg3 *tp)
8544 tp->link_config.advertising =
8545 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8546 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8547 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8548 ADVERTISED_Autoneg | ADVERTISED_MII);
8549 tp->link_config.speed = SPEED_INVALID;
8550 tp->link_config.duplex = DUPLEX_INVALID;
8551 tp->link_config.autoneg = AUTONEG_ENABLE;
8552 netif_carrier_off(tp->dev);
8553 tp->link_config.active_speed = SPEED_INVALID;
8554 tp->link_config.active_duplex = DUPLEX_INVALID;
8555 tp->link_config.phy_is_low_power = 0;
8556 tp->link_config.orig_speed = SPEED_INVALID;
8557 tp->link_config.orig_duplex = DUPLEX_INVALID;
8558 tp->link_config.orig_autoneg = AUTONEG_INVALID;
8561 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8563 tp->bufmgr_config.mbuf_read_dma_low_water =
8564 DEFAULT_MB_RDMA_LOW_WATER;
8565 tp->bufmgr_config.mbuf_mac_rx_low_water =
8566 DEFAULT_MB_MACRX_LOW_WATER;
8567 tp->bufmgr_config.mbuf_high_water =
8568 DEFAULT_MB_HIGH_WATER;
8570 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8571 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8572 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8573 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8574 tp->bufmgr_config.mbuf_high_water_jumbo =
8575 DEFAULT_MB_HIGH_WATER_JUMBO;
8577 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8578 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8581 static char * __devinit tg3_phy_string(struct tg3 *tp)
8583 switch (tp->phy_id & PHY_ID_MASK) {
8584 case PHY_ID_BCM5400: return "5400";
8585 case PHY_ID_BCM5401: return "5401";
8586 case PHY_ID_BCM5411: return "5411";
8587 case PHY_ID_BCM5701: return "5701";
8588 case PHY_ID_BCM5703: return "5703";
8589 case PHY_ID_BCM5704: return "5704";
8590 case PHY_ID_BCM5705: return "5705";
8591 case PHY_ID_BCM5750: return "5750";
8592 case PHY_ID_BCM8002: return "8002/serdes";
8593 case 0: return "serdes";
8594 default: return "unknown";
8598 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8600 struct pci_dev *peer;
8601 unsigned int func, devnr = tp->pdev->devfn & ~7;
8603 for (func = 0; func < 8; func++) {
8604 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8605 if (peer && peer != tp->pdev)
8609 if (!peer || peer == tp->pdev)
8613 * We don't need to keep the refcount elevated; there's no way
8614 * to remove one half of this device without removing the other
8621 static int __devinit tg3_init_one(struct pci_dev *pdev,
8622 const struct pci_device_id *ent)
8624 static int tg3_version_printed = 0;
8625 unsigned long tg3reg_base, tg3reg_len;
8626 struct net_device *dev;
8628 int i, err, pci_using_dac, pm_cap;
8630 if (tg3_version_printed++ == 0)
8631 printk(KERN_INFO "%s", version);
8633 err = pci_enable_device(pdev);
8635 printk(KERN_ERR PFX "Cannot enable PCI device, "
8640 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8641 printk(KERN_ERR PFX "Cannot find proper PCI device "
8642 "base address, aborting.\n");
8644 goto err_out_disable_pdev;
8647 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8649 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8651 goto err_out_disable_pdev;
8654 pci_set_master(pdev);
8656 /* Find power-management capability. */
8657 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8659 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8662 goto err_out_free_res;
8665 /* Configure DMA attributes. */
8666 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8669 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8671 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8672 "for consistent allocations\n");
8673 goto err_out_free_res;
8676 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8678 printk(KERN_ERR PFX "No usable DMA configuration, "
8680 goto err_out_free_res;
8685 tg3reg_base = pci_resource_start(pdev, 0);
8686 tg3reg_len = pci_resource_len(pdev, 0);
8688 dev = alloc_etherdev(sizeof(*tp));
8690 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8692 goto err_out_free_res;
8695 SET_MODULE_OWNER(dev);
8696 SET_NETDEV_DEV(dev, &pdev->dev);
8699 dev->features |= NETIF_F_HIGHDMA;
8700 dev->features |= NETIF_F_LLTX;
8701 #if TG3_VLAN_TAG_USED
8702 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8703 dev->vlan_rx_register = tg3_vlan_rx_register;
8704 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8707 tp = netdev_priv(dev);
8710 tp->pm_cap = pm_cap;
8711 tp->mac_mode = TG3_DEF_MAC_MODE;
8712 tp->rx_mode = TG3_DEF_RX_MODE;
8713 tp->tx_mode = TG3_DEF_TX_MODE;
8714 tp->mi_mode = MAC_MI_MODE_BASE;
8716 tp->msg_enable = tg3_debug;
8718 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8720 /* The word/byte swap controls here control register access byte
8721 * swapping. DMA data byte swapping is controlled in the GRC_MODE
8724 tp->misc_host_ctrl =
8725 MISC_HOST_CTRL_MASK_PCI_INT |
8726 MISC_HOST_CTRL_WORD_SWAP |
8727 MISC_HOST_CTRL_INDIR_ACCESS |
8728 MISC_HOST_CTRL_PCISTATE_RW;
8730 /* The NONFRM (non-frame) byte/word swap controls take effect
8731 * on descriptor entries, anything which isn't packet data.
8733 * The StrongARM chips on the board (one for tx, one for rx)
8734 * are running in big-endian mode.
8736 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8737 GRC_MODE_WSWAP_NONFRM_DATA);
8739 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8741 spin_lock_init(&tp->lock);
8742 spin_lock_init(&tp->tx_lock);
8743 spin_lock_init(&tp->indirect_lock);
8744 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8746 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8747 if (tp->regs == 0UL) {
8748 printk(KERN_ERR PFX "Cannot map device registers, "
8751 goto err_out_free_dev;
8754 tg3_init_link_config(tp);
8756 tg3_init_bufmgr_config(tp);
8758 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8759 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8760 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8762 dev->open = tg3_open;
8763 dev->stop = tg3_close;
8764 dev->get_stats = tg3_get_stats;
8765 dev->set_multicast_list = tg3_set_rx_mode;
8766 dev->set_mac_address = tg3_set_mac_addr;
8767 dev->do_ioctl = tg3_ioctl;
8768 dev->tx_timeout = tg3_tx_timeout;
8769 dev->poll = tg3_poll;
8770 dev->ethtool_ops = &tg3_ethtool_ops;
8772 dev->watchdog_timeo = TG3_TX_TIMEOUT;
8773 dev->change_mtu = tg3_change_mtu;
8774 dev->irq = pdev->irq;
8775 #ifdef CONFIG_NET_POLL_CONTROLLER
8776 dev->poll_controller = tg3_poll_controller;
8779 err = tg3_get_invariants(tp);
8781 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8783 goto err_out_iounmap;
8786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8788 tp->bufmgr_config.mbuf_read_dma_low_water =
8789 DEFAULT_MB_RDMA_LOW_WATER_5705;
8790 tp->bufmgr_config.mbuf_mac_rx_low_water =
8791 DEFAULT_MB_MACRX_LOW_WATER_5705;
8792 tp->bufmgr_config.mbuf_high_water =
8793 DEFAULT_MB_HIGH_WATER_5705;
8796 #if TG3_TSO_SUPPORT != 0
8797 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8798 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8800 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8801 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8802 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8803 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8804 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8806 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8809 /* TSO is off by default, user can enable using ethtool. */
8811 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8812 dev->features |= NETIF_F_TSO;
8817 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8818 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8819 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8820 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8821 tp->rx_pending = 63;
8824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8825 tp->pdev_peer = tg3_find_5704_peer(tp);
8827 err = tg3_get_device_address(tp);
8829 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8831 goto err_out_iounmap;
8835 * Reset chip in case UNDI or EFI driver did not shutdown
8836 * DMA self test will enable WDMAC and we'll see (spurious)
8837 * pending DMA on the PCI bus at that point.
8839 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8840 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8841 pci_save_state(tp->pdev);
8842 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8846 err = tg3_test_dma(tp);
8848 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8849 goto err_out_iounmap;
8852 /* Tigon3 can do ipv4 only... and some chips have buggy
8855 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8856 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8857 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8859 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8861 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8862 dev->features &= ~NETIF_F_HIGHDMA;
8864 /* flow control autonegotiation is default behavior */
8865 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8867 err = register_netdev(dev);
8869 printk(KERN_ERR PFX "Cannot register net device, "
8871 goto err_out_iounmap;
8874 pci_set_drvdata(pdev, dev);
8876 /* Now that we have fully setup the chip, save away a snapshot
8877 * of the PCI config space. We need to restore this after
8878 * GRC_MISC_CFG core clock resets and some resume events.
8880 pci_save_state(tp->pdev);
8882 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8884 tp->board_part_number,
8885 tp->pci_chip_rev_id,
8887 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8888 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8889 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8890 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8891 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8892 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8894 for (i = 0; i < 6; i++)
8895 printk("%2.2x%c", dev->dev_addr[i],
8896 i == 5 ? '\n' : ':');
8898 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8899 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8902 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8903 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8904 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8905 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8906 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8907 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8908 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8919 pci_release_regions(pdev);
8921 err_out_disable_pdev:
8922 pci_disable_device(pdev);
8923 pci_set_drvdata(pdev, NULL);
8927 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8929 struct net_device *dev = pci_get_drvdata(pdev);
8932 struct tg3 *tp = netdev_priv(dev);
8934 unregister_netdev(dev);
8937 pci_release_regions(pdev);
8938 pci_disable_device(pdev);
8939 pci_set_drvdata(pdev, NULL);
8943 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8945 struct net_device *dev = pci_get_drvdata(pdev);
8946 struct tg3 *tp = netdev_priv(dev);
8949 if (!netif_running(dev))
8954 del_timer_sync(&tp->timer);
8956 spin_lock_irq(&tp->lock);
8957 spin_lock(&tp->tx_lock);
8958 tg3_disable_ints(tp);
8959 spin_unlock(&tp->tx_lock);
8960 spin_unlock_irq(&tp->lock);
8962 netif_device_detach(dev);
8964 spin_lock_irq(&tp->lock);
8965 spin_lock(&tp->tx_lock);
8967 spin_unlock(&tp->tx_lock);
8968 spin_unlock_irq(&tp->lock);
8970 err = tg3_set_power_state(tp, state);
8972 spin_lock_irq(&tp->lock);
8973 spin_lock(&tp->tx_lock);
8977 tp->timer.expires = jiffies + tp->timer_offset;
8978 add_timer(&tp->timer);
8980 netif_device_attach(dev);
8981 tg3_netif_start(tp);
8983 spin_unlock(&tp->tx_lock);
8984 spin_unlock_irq(&tp->lock);
8990 static int tg3_resume(struct pci_dev *pdev)
8992 struct net_device *dev = pci_get_drvdata(pdev);
8993 struct tg3 *tp = netdev_priv(dev);
8996 if (!netif_running(dev))
8999 pci_restore_state(tp->pdev);
9001 err = tg3_set_power_state(tp, 0);
9005 netif_device_attach(dev);
9007 spin_lock_irq(&tp->lock);
9008 spin_lock(&tp->tx_lock);
9012 tp->timer.expires = jiffies + tp->timer_offset;
9013 add_timer(&tp->timer);
9015 tg3_enable_ints(tp);
9017 tg3_netif_start(tp);
9019 spin_unlock(&tp->tx_lock);
9020 spin_unlock_irq(&tp->lock);
9025 static struct pci_driver tg3_driver = {
9026 .name = DRV_MODULE_NAME,
9027 .id_table = tg3_pci_tbl,
9028 .probe = tg3_init_one,
9029 .remove = __devexit_p(tg3_remove_one),
9030 .suspend = tg3_suspend,
9031 .resume = tg3_resume
9034 static int __init tg3_init(void)
9036 return pci_module_init(&tg3_driver);
9039 static void __exit tg3_cleanup(void)
9041 pci_unregister_driver(&tg3_driver);
9044 module_init(tg3_init);
9045 module_exit(tg3_cleanup);