2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9 * Copyright (C) 2000-2003 Broadcom Corporation.
12 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
34 #include <net/checksum.h>
36 #include <asm/system.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
50 #define TG3_VLAN_TAG_USED 0
54 #define TG3_TSO_SUPPORT 1
56 #define TG3_TSO_SUPPORT 0
61 #define DRV_MODULE_NAME "tg3"
62 #define PFX DRV_MODULE_NAME ": "
63 #define DRV_MODULE_VERSION "3.14"
64 #define DRV_MODULE_RELDATE "November 15, 2004"
66 #define TG3_DEF_MAC_MODE 0
67 #define TG3_DEF_RX_MODE 0
68 #define TG3_DEF_TX_MODE 0
69 #define TG3_DEF_MSG_ENABLE \
79 /* length of time before we decide the hardware is borked,
80 * and dev->tx_timeout() should be called to fix the problem
82 #define TG3_TX_TIMEOUT (5 * HZ)
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU 60
86 #define TG3_MAX_MTU(tp) \
87 ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91 * You can't change the ring sizes, but you can change where you place
92 * them in the NIC onboard memory.
94 #define TG3_RX_RING_SIZE 512
95 #define TG3_DEF_RX_RING_PENDING 200
96 #define TG3_RX_JUMBO_RING_SIZE 256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
99 /* Do not place this n-ring entries value into the tp struct itself,
100 * we really want to expose these constants to GCC so that modulo et
101 * al. operations are done with shifts and masks instead of with
102 * hw multiply/modulo instructions. Another solution would be to
103 * replace things like '% foo' with '& (foo - 1)'.
105 #define TG3_RX_RCB_RING_SIZE(tp) \
106 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
110 #define TG3_TX_RING_SIZE 512
111 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 #define TX_RING_GAP(TP) \
122 (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP) \
124 (((TP)->tx_cons <= (TP)->tx_prod) ? \
125 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
126 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138 static char version[] __devinitdata =
139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_VERSION(DRV_MODULE_VERSION);
146 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
147 module_param(tg3_debug, int, 0);
148 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
149 MODULE_VERSION(DRV_MODULE_VERSION);
151 static struct pci_device_id tg3_pci_tbl[] = {
152 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
238 const char string[ETH_GSTRING_LEN];
239 } ethtool_stats_keys[TG3_NUM_STATS] = {
242 { "rx_ucast_packets" },
243 { "rx_mcast_packets" },
244 { "rx_bcast_packets" },
246 { "rx_align_errors" },
247 { "rx_xon_pause_rcvd" },
248 { "rx_xoff_pause_rcvd" },
249 { "rx_mac_ctrl_rcvd" },
250 { "rx_xoff_entered" },
251 { "rx_frame_too_long_errors" },
253 { "rx_undersize_packets" },
254 { "rx_in_length_errors" },
255 { "rx_out_length_errors" },
256 { "rx_64_or_less_octet_packets" },
257 { "rx_65_to_127_octet_packets" },
258 { "rx_128_to_255_octet_packets" },
259 { "rx_256_to_511_octet_packets" },
260 { "rx_512_to_1023_octet_packets" },
261 { "rx_1024_to_1522_octet_packets" },
262 { "rx_1523_to_2047_octet_packets" },
263 { "rx_2048_to_4095_octet_packets" },
264 { "rx_4096_to_8191_octet_packets" },
265 { "rx_8192_to_9022_octet_packets" },
272 { "tx_flow_control" },
274 { "tx_single_collisions" },
275 { "tx_mult_collisions" },
277 { "tx_excessive_collisions" },
278 { "tx_late_collisions" },
279 { "tx_collide_2times" },
280 { "tx_collide_3times" },
281 { "tx_collide_4times" },
282 { "tx_collide_5times" },
283 { "tx_collide_6times" },
284 { "tx_collide_7times" },
285 { "tx_collide_8times" },
286 { "tx_collide_9times" },
287 { "tx_collide_10times" },
288 { "tx_collide_11times" },
289 { "tx_collide_12times" },
290 { "tx_collide_13times" },
291 { "tx_collide_14times" },
292 { "tx_collide_15times" },
293 { "tx_ucast_packets" },
294 { "tx_mcast_packets" },
295 { "tx_bcast_packets" },
296 { "tx_carrier_sense_errors" },
300 { "dma_writeq_full" },
301 { "dma_write_prioq_full" },
305 { "rx_threshold_hit" },
307 { "dma_readq_full" },
308 { "dma_read_prioq_full" },
309 { "tx_comp_queue_full" },
311 { "ring_set_send_prod_index" },
312 { "ring_status_update" },
314 { "nic_avoided_irqs" },
315 { "nic_tx_threshold_hit" }
318 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
323 spin_lock_irqsave(&tp->indirect_lock, flags);
324 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
325 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
326 spin_unlock_irqrestore(&tp->indirect_lock, flags);
328 writel(val, tp->regs + off);
329 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
330 readl(tp->regs + off);
334 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
336 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
339 spin_lock_irqsave(&tp->indirect_lock, flags);
340 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
342 spin_unlock_irqrestore(&tp->indirect_lock, flags);
344 void __iomem *dest = tp->regs + off;
346 readl(dest); /* always flush PCI write */
350 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
352 void __iomem *mbox = tp->regs + off;
354 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
358 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
360 void __iomem *mbox = tp->regs + off;
362 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
364 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
368 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
369 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
370 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
372 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
373 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
374 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
375 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
376 #define tr32(reg) readl(tp->regs + (reg))
377 #define tr16(reg) readw(tp->regs + (reg))
378 #define tr8(reg) readb(tp->regs + (reg))
380 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
384 spin_lock_irqsave(&tp->indirect_lock, flags);
385 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
386 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
388 /* Always leave this as zero. */
389 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
390 spin_unlock_irqrestore(&tp->indirect_lock, flags);
393 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
397 spin_lock_irqsave(&tp->indirect_lock, flags);
398 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
399 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
401 /* Always leave this as zero. */
402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
403 spin_unlock_irqrestore(&tp->indirect_lock, flags);
406 static void tg3_disable_ints(struct tg3 *tp)
408 tw32(TG3PCI_MISC_HOST_CTRL,
409 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
410 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
411 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
414 static inline void tg3_cond_int(struct tg3 *tp)
416 if (tp->hw_status->status & SD_STATUS_UPDATED)
417 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
420 static void tg3_enable_ints(struct tg3 *tp)
422 tw32(TG3PCI_MISC_HOST_CTRL,
423 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
424 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
425 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
431 * similar to tg3_enable_ints, but it can return without flushing the
432 * PIO write which reenables interrupts
434 static void tg3_restart_ints(struct tg3 *tp)
436 tw32(TG3PCI_MISC_HOST_CTRL,
437 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
438 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
444 static inline void tg3_netif_stop(struct tg3 *tp)
446 netif_poll_disable(tp->dev);
447 netif_tx_disable(tp->dev);
450 static inline void tg3_netif_start(struct tg3 *tp)
452 netif_wake_queue(tp->dev);
453 /* NOTE: unconditional netif_wake_queue is only appropriate
454 * so long as all callers are assured to have free tx slots
455 * (such as after tg3_init_hw)
457 netif_poll_enable(tp->dev);
461 static void tg3_switch_clocks(struct tg3 *tp)
463 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
466 orig_clock_ctrl = clock_ctrl;
467 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
468 CLOCK_CTRL_CLKRUN_OENABLE |
470 tp->pci_clock_ctrl = clock_ctrl;
472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
474 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
475 tw32_f(TG3PCI_CLOCK_CTRL,
476 clock_ctrl | CLOCK_CTRL_625_CORE);
479 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
480 tw32_f(TG3PCI_CLOCK_CTRL,
482 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
484 tw32_f(TG3PCI_CLOCK_CTRL,
485 clock_ctrl | (CLOCK_CTRL_ALTCLK));
488 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
492 #define PHY_BUSY_LOOPS 5000
494 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
499 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
501 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
507 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
508 MI_COM_PHY_ADDR_MASK);
509 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
510 MI_COM_REG_ADDR_MASK);
511 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
513 tw32_f(MAC_MI_COM, frame_val);
515 loops = PHY_BUSY_LOOPS;
516 while (loops-- > 0) {
518 frame_val = tr32(MAC_MI_COM);
520 if ((frame_val & MI_COM_BUSY) == 0) {
522 frame_val = tr32(MAC_MI_COM);
529 *val = frame_val & MI_COM_DATA_MASK;
533 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534 tw32_f(MAC_MI_MODE, tp->mi_mode);
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
546 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
552 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
553 MI_COM_PHY_ADDR_MASK);
554 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
555 MI_COM_REG_ADDR_MASK);
556 frame_val |= (val & MI_COM_DATA_MASK);
557 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559 tw32_f(MAC_MI_COM, frame_val);
561 loops = PHY_BUSY_LOOPS;
562 while (loops-- > 0) {
564 frame_val = tr32(MAC_MI_COM);
565 if ((frame_val & MI_COM_BUSY) == 0) {
567 frame_val = tr32(MAC_MI_COM);
576 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
577 tw32_f(MAC_MI_MODE, tp->mi_mode);
584 static void tg3_phy_set_wirespeed(struct tg3 *tp)
588 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
592 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
593 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
596 static int tg3_bmcr_reset(struct tg3 *tp)
601 /* OK, reset it, and poll the BMCR_RESET bit until it
602 * clears or we time out.
604 phy_control = BMCR_RESET;
605 err = tg3_writephy(tp, MII_BMCR, phy_control);
611 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615 if ((phy_control & BMCR_RESET) == 0) {
627 static int tg3_wait_macro_done(struct tg3 *tp)
634 tg3_readphy(tp, 0x16, &tmp32);
635 if ((tmp32 & 0x1000) == 0)
644 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
646 static const u32 test_pat[4][6] = {
647 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
648 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
649 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
650 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
654 for (chan = 0; chan < 4; chan++) {
657 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
658 (chan * 0x2000) | 0x0200);
659 tg3_writephy(tp, 0x16, 0x0002);
661 for (i = 0; i < 6; i++)
662 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
665 tg3_writephy(tp, 0x16, 0x0202);
666 if (tg3_wait_macro_done(tp)) {
671 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
672 (chan * 0x2000) | 0x0200);
673 tg3_writephy(tp, 0x16, 0x0082);
674 if (tg3_wait_macro_done(tp)) {
679 tg3_writephy(tp, 0x16, 0x0802);
680 if (tg3_wait_macro_done(tp)) {
685 for (i = 0; i < 6; i += 2) {
688 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
689 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
690 if (tg3_wait_macro_done(tp)) {
696 if (low != test_pat[chan][i] ||
697 high != test_pat[chan][i+1]) {
698 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
699 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
700 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
710 static int tg3_phy_reset_chanpat(struct tg3 *tp)
714 for (chan = 0; chan < 4; chan++) {
717 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
718 (chan * 0x2000) | 0x0200);
719 tg3_writephy(tp, 0x16, 0x0002);
720 for (i = 0; i < 6; i++)
721 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
722 tg3_writephy(tp, 0x16, 0x0202);
723 if (tg3_wait_macro_done(tp))
730 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
732 u32 reg32, phy9_orig;
733 int retries, do_phy_reset, err;
739 err = tg3_bmcr_reset(tp);
745 /* Disable transmitter and interrupt. */
746 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
748 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
750 /* Set full-duplex, 1000 mbps. */
751 tg3_writephy(tp, MII_BMCR,
752 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
754 /* Set to master mode. */
755 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
756 tg3_writephy(tp, MII_TG3_CTRL,
757 (MII_TG3_CTRL_AS_MASTER |
758 MII_TG3_CTRL_ENABLE_AS_MASTER));
760 /* Enable SM_DSP_CLOCK and 6dB. */
761 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
763 /* Block the PHY control access. */
764 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
765 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
767 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
772 err = tg3_phy_reset_chanpat(tp);
776 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
777 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
779 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
780 tg3_writephy(tp, 0x16, 0x0000);
782 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
784 /* Set Extended packet length bit for jumbo frames */
785 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
788 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
791 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
793 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
795 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
800 /* This will reset the tigon3 PHY if there is no valid
801 * link unless the FORCE argument is non-zero.
803 static int tg3_phy_reset(struct tg3 *tp)
808 err = tg3_readphy(tp, MII_BMSR, &phy_status);
809 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
816 err = tg3_phy_reset_5703_4_5(tp);
822 err = tg3_bmcr_reset(tp);
827 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
828 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
829 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
830 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
831 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
832 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
833 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
835 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
836 tg3_writephy(tp, 0x1c, 0x8d68);
837 tg3_writephy(tp, 0x1c, 0x8d68);
839 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
840 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
841 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
844 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
845 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
846 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
847 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
849 /* Set Extended packet length bit (bit 14) on all chips that */
850 /* support jumbo frames */
851 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
852 /* Cannot do read-modify-write on 5401 */
853 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
854 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
855 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
858 /* Set bit 14 with read-modify-write to preserve other bits */
859 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
860 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
861 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
863 tg3_phy_set_wirespeed(tp);
867 static void tg3_frob_aux_power(struct tg3 *tp)
869 struct tg3 *tp_peer = tp;
871 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
875 tp_peer = pci_get_drvdata(tp->pdev_peer);
881 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
882 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
885 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
886 (GRC_LCLCTRL_GPIO_OE0 |
887 GRC_LCLCTRL_GPIO_OE1 |
888 GRC_LCLCTRL_GPIO_OE2 |
889 GRC_LCLCTRL_GPIO_OUTPUT0 |
890 GRC_LCLCTRL_GPIO_OUTPUT1));
897 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
900 /* On 5753 and variants, GPIO2 cannot be used. */
901 no_gpio2 = (tp->nic_sram_data_cfg &
902 NIC_SRAM_DATA_CFG_NO_GPIO2) != 0;
904 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
905 GRC_LCLCTRL_GPIO_OE1 |
906 GRC_LCLCTRL_GPIO_OE2 |
907 GRC_LCLCTRL_GPIO_OUTPUT1 |
908 GRC_LCLCTRL_GPIO_OUTPUT2;
910 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
911 GRC_LCLCTRL_GPIO_OUTPUT2);
913 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
917 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
918 GRC_LCLCTRL_GPIO_OE1 |
919 GRC_LCLCTRL_GPIO_OE2 |
920 GRC_LCLCTRL_GPIO_OUTPUT0 |
921 GRC_LCLCTRL_GPIO_OUTPUT1 |
922 GRC_LCLCTRL_GPIO_OUTPUT2;
924 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
925 GRC_LCLCTRL_GPIO_OUTPUT2);
927 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
931 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
932 GRC_LCLCTRL_GPIO_OE1 |
933 GRC_LCLCTRL_GPIO_OE2 |
934 GRC_LCLCTRL_GPIO_OUTPUT0 |
935 GRC_LCLCTRL_GPIO_OUTPUT1;
937 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
943 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
946 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
949 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
950 (GRC_LCLCTRL_GPIO_OE1 |
951 GRC_LCLCTRL_GPIO_OUTPUT1));
954 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
955 (GRC_LCLCTRL_GPIO_OE1));
958 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959 (GRC_LCLCTRL_GPIO_OE1 |
960 GRC_LCLCTRL_GPIO_OUTPUT1));
966 static int tg3_setup_phy(struct tg3 *, int);
968 #define RESET_KIND_SHUTDOWN 0
969 #define RESET_KIND_INIT 1
970 #define RESET_KIND_SUSPEND 2
972 static void tg3_write_sig_post_reset(struct tg3 *, int);
974 static int tg3_set_power_state(struct tg3 *tp, int state)
977 u16 power_control, power_caps;
980 /* Make sure register accesses (indirect or otherwise)
981 * will function correctly.
983 pci_write_config_dword(tp->pdev,
984 TG3PCI_MISC_HOST_CTRL,
987 pci_read_config_word(tp->pdev,
990 power_control |= PCI_PM_CTRL_PME_STATUS;
991 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
995 pci_write_config_word(tp->pdev,
998 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1016 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1018 tp->dev->name, state);
1022 power_control |= PCI_PM_CTRL_PME_ENABLE;
1024 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1025 tw32(TG3PCI_MISC_HOST_CTRL,
1026 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1028 if (tp->link_config.phy_is_low_power == 0) {
1029 tp->link_config.phy_is_low_power = 1;
1030 tp->link_config.orig_speed = tp->link_config.speed;
1031 tp->link_config.orig_duplex = tp->link_config.duplex;
1032 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1035 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1036 tp->link_config.speed = SPEED_10;
1037 tp->link_config.duplex = DUPLEX_HALF;
1038 tp->link_config.autoneg = AUTONEG_ENABLE;
1039 tg3_setup_phy(tp, 0);
1042 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1044 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1047 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1048 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1051 mac_mode = MAC_MODE_PORT_MODE_MII;
1053 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1054 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1055 mac_mode |= MAC_MODE_LINK_POLARITY;
1057 mac_mode = MAC_MODE_PORT_MODE_TBI;
1060 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1061 tw32(MAC_LED_CTRL, tp->led_ctrl);
1063 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1064 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1065 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1067 tw32_f(MAC_MODE, mac_mode);
1070 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1074 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1075 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1079 base_val = tp->pci_clock_ctrl;
1080 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1081 CLOCK_CTRL_TXCLK_DISABLE);
1083 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1085 CLOCK_CTRL_PWRDOWN_PLL133);
1087 } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1088 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1089 u32 newbits1, newbits2;
1091 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1093 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1094 CLOCK_CTRL_TXCLK_DISABLE |
1096 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1099 newbits1 = CLOCK_CTRL_625_CORE;
1100 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1102 newbits1 = CLOCK_CTRL_ALTCLK;
1103 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1106 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1109 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1112 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1113 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1118 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1119 CLOCK_CTRL_TXCLK_DISABLE |
1120 CLOCK_CTRL_44MHZ_CORE);
1122 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1125 tw32_f(TG3PCI_CLOCK_CTRL,
1126 tp->pci_clock_ctrl | newbits3);
1131 tg3_frob_aux_power(tp);
1133 /* Finally, set the new power state. */
1134 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1136 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1141 static void tg3_link_report(struct tg3 *tp)
1143 if (!netif_carrier_ok(tp->dev)) {
1144 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1146 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1148 (tp->link_config.active_speed == SPEED_1000 ?
1150 (tp->link_config.active_speed == SPEED_100 ?
1152 (tp->link_config.active_duplex == DUPLEX_FULL ?
1155 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1158 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1159 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1163 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1165 u32 new_tg3_flags = 0;
1166 u32 old_rx_mode = tp->rx_mode;
1167 u32 old_tx_mode = tp->tx_mode;
1169 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1170 if (local_adv & ADVERTISE_PAUSE_CAP) {
1171 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1172 if (remote_adv & LPA_PAUSE_CAP)
1174 (TG3_FLAG_RX_PAUSE |
1176 else if (remote_adv & LPA_PAUSE_ASYM)
1178 (TG3_FLAG_RX_PAUSE);
1180 if (remote_adv & LPA_PAUSE_CAP)
1182 (TG3_FLAG_RX_PAUSE |
1185 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1186 if ((remote_adv & LPA_PAUSE_CAP) &&
1187 (remote_adv & LPA_PAUSE_ASYM))
1188 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1191 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1192 tp->tg3_flags |= new_tg3_flags;
1194 new_tg3_flags = tp->tg3_flags;
1197 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1198 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1200 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1202 if (old_rx_mode != tp->rx_mode) {
1203 tw32_f(MAC_RX_MODE, tp->rx_mode);
1206 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1207 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1209 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1211 if (old_tx_mode != tp->tx_mode) {
1212 tw32_f(MAC_TX_MODE, tp->tx_mode);
1216 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1218 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1219 case MII_TG3_AUX_STAT_10HALF:
1221 *duplex = DUPLEX_HALF;
1224 case MII_TG3_AUX_STAT_10FULL:
1226 *duplex = DUPLEX_FULL;
1229 case MII_TG3_AUX_STAT_100HALF:
1231 *duplex = DUPLEX_HALF;
1234 case MII_TG3_AUX_STAT_100FULL:
1236 *duplex = DUPLEX_FULL;
1239 case MII_TG3_AUX_STAT_1000HALF:
1240 *speed = SPEED_1000;
1241 *duplex = DUPLEX_HALF;
1244 case MII_TG3_AUX_STAT_1000FULL:
1245 *speed = SPEED_1000;
1246 *duplex = DUPLEX_FULL;
1250 *speed = SPEED_INVALID;
1251 *duplex = DUPLEX_INVALID;
1256 static int tg3_phy_copper_begin(struct tg3 *tp)
1261 if (tp->link_config.phy_is_low_power) {
1262 /* Entering low power mode. Disable gigabit and
1263 * 100baseT advertisements.
1265 tg3_writephy(tp, MII_TG3_CTRL, 0);
1267 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1268 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1269 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1270 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1272 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1273 } else if (tp->link_config.speed == SPEED_INVALID) {
1274 tp->link_config.advertising =
1275 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1276 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1277 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1278 ADVERTISED_Autoneg | ADVERTISED_MII);
1280 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1281 tp->link_config.advertising &=
1282 ~(ADVERTISED_1000baseT_Half |
1283 ADVERTISED_1000baseT_Full);
1285 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1286 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1287 new_adv |= ADVERTISE_10HALF;
1288 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1289 new_adv |= ADVERTISE_10FULL;
1290 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1291 new_adv |= ADVERTISE_100HALF;
1292 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1293 new_adv |= ADVERTISE_100FULL;
1294 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1296 if (tp->link_config.advertising &
1297 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1299 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1300 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1301 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1302 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1303 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1304 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1305 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1306 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1307 MII_TG3_CTRL_ENABLE_AS_MASTER);
1308 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1310 tg3_writephy(tp, MII_TG3_CTRL, 0);
1313 /* Asking for a specific link mode. */
1314 if (tp->link_config.speed == SPEED_1000) {
1315 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1316 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1318 if (tp->link_config.duplex == DUPLEX_FULL)
1319 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1321 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1322 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1323 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1324 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1325 MII_TG3_CTRL_ENABLE_AS_MASTER);
1326 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1328 tg3_writephy(tp, MII_TG3_CTRL, 0);
1330 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1331 if (tp->link_config.speed == SPEED_100) {
1332 if (tp->link_config.duplex == DUPLEX_FULL)
1333 new_adv |= ADVERTISE_100FULL;
1335 new_adv |= ADVERTISE_100HALF;
1337 if (tp->link_config.duplex == DUPLEX_FULL)
1338 new_adv |= ADVERTISE_10FULL;
1340 new_adv |= ADVERTISE_10HALF;
1342 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1346 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1347 tp->link_config.speed != SPEED_INVALID) {
1348 u32 bmcr, orig_bmcr;
1350 tp->link_config.active_speed = tp->link_config.speed;
1351 tp->link_config.active_duplex = tp->link_config.duplex;
1354 switch (tp->link_config.speed) {
1360 bmcr |= BMCR_SPEED100;
1364 bmcr |= TG3_BMCR_SPEED1000;
1368 if (tp->link_config.duplex == DUPLEX_FULL)
1369 bmcr |= BMCR_FULLDPLX;
1371 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1372 if (bmcr != orig_bmcr) {
1373 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1374 for (i = 0; i < 1500; i++) {
1378 tg3_readphy(tp, MII_BMSR, &tmp);
1379 tg3_readphy(tp, MII_BMSR, &tmp);
1380 if (!(tmp & BMSR_LSTATUS)) {
1385 tg3_writephy(tp, MII_BMCR, bmcr);
1389 tg3_writephy(tp, MII_BMCR,
1390 BMCR_ANENABLE | BMCR_ANRESTART);
1396 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1400 /* Turn off tap power management. */
1401 /* Set Extended packet length bit */
1402 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1404 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1405 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1407 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1408 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1410 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1411 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1413 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1414 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1416 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1417 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1424 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1426 u32 adv_reg, all_mask;
1428 tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1429 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1430 ADVERTISE_100HALF | ADVERTISE_100FULL);
1431 if ((adv_reg & all_mask) != all_mask)
1433 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1436 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1437 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1438 MII_TG3_CTRL_ADV_1000_FULL);
1439 if ((tg3_ctrl & all_mask) != all_mask)
1445 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1447 int current_link_up;
1456 (MAC_STATUS_SYNC_CHANGED |
1457 MAC_STATUS_CFG_CHANGED |
1458 MAC_STATUS_MI_COMPLETION |
1459 MAC_STATUS_LNKSTATE_CHANGED));
1462 tp->mi_mode = MAC_MI_MODE_BASE;
1463 tw32_f(MAC_MI_MODE, tp->mi_mode);
1466 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1468 /* Some third-party PHYs need to be reset on link going
1471 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1474 netif_carrier_ok(tp->dev)) {
1475 tg3_readphy(tp, MII_BMSR, &bmsr);
1476 tg3_readphy(tp, MII_BMSR, &bmsr);
1477 if (!(bmsr & BMSR_LSTATUS))
1483 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1484 tg3_readphy(tp, MII_BMSR, &bmsr);
1485 tg3_readphy(tp, MII_BMSR, &bmsr);
1487 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1490 if (!(bmsr & BMSR_LSTATUS)) {
1491 err = tg3_init_5401phy_dsp(tp);
1495 tg3_readphy(tp, MII_BMSR, &bmsr);
1496 for (i = 0; i < 1000; i++) {
1498 tg3_readphy(tp, MII_BMSR, &bmsr);
1499 if (bmsr & BMSR_LSTATUS) {
1505 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1506 !(bmsr & BMSR_LSTATUS) &&
1507 tp->link_config.active_speed == SPEED_1000) {
1508 err = tg3_phy_reset(tp);
1510 err = tg3_init_5401phy_dsp(tp);
1515 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1516 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1517 /* 5701 {A0,B0} CRC bug workaround */
1518 tg3_writephy(tp, 0x15, 0x0a75);
1519 tg3_writephy(tp, 0x1c, 0x8c68);
1520 tg3_writephy(tp, 0x1c, 0x8d68);
1521 tg3_writephy(tp, 0x1c, 0x8c68);
1524 /* Clear pending interrupts... */
1525 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1526 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1528 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1529 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1531 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1535 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1536 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1537 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1539 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1542 current_link_up = 0;
1543 current_speed = SPEED_INVALID;
1544 current_duplex = DUPLEX_INVALID;
1546 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1549 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1550 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1551 if (!(val & (1 << 10))) {
1553 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1559 for (i = 0; i < 100; i++) {
1560 tg3_readphy(tp, MII_BMSR, &bmsr);
1561 tg3_readphy(tp, MII_BMSR, &bmsr);
1562 if (bmsr & BMSR_LSTATUS)
1567 if (bmsr & BMSR_LSTATUS) {
1570 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1571 for (i = 0; i < 2000; i++) {
1573 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1578 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1583 for (i = 0; i < 200; i++) {
1584 tg3_readphy(tp, MII_BMCR, &bmcr);
1585 tg3_readphy(tp, MII_BMCR, &bmcr);
1586 if (bmcr && bmcr != 0x7fff)
1591 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1592 if (bmcr & BMCR_ANENABLE) {
1593 current_link_up = 1;
1595 /* Force autoneg restart if we are exiting
1598 if (!tg3_copper_is_advertising_all(tp))
1599 current_link_up = 0;
1601 current_link_up = 0;
1604 if (!(bmcr & BMCR_ANENABLE) &&
1605 tp->link_config.speed == current_speed &&
1606 tp->link_config.duplex == current_duplex) {
1607 current_link_up = 1;
1609 current_link_up = 0;
1613 tp->link_config.active_speed = current_speed;
1614 tp->link_config.active_duplex = current_duplex;
1617 if (current_link_up == 1 &&
1618 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1619 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1620 u32 local_adv, remote_adv;
1622 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1623 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1625 tg3_readphy(tp, MII_LPA, &remote_adv);
1626 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1628 /* If we are not advertising full pause capability,
1629 * something is wrong. Bring the link down and reconfigure.
1631 if (local_adv != ADVERTISE_PAUSE_CAP) {
1632 current_link_up = 0;
1634 tg3_setup_flow_control(tp, local_adv, remote_adv);
1638 if (current_link_up == 0) {
1641 tg3_phy_copper_begin(tp);
1643 tg3_readphy(tp, MII_BMSR, &tmp);
1644 tg3_readphy(tp, MII_BMSR, &tmp);
1645 if (tmp & BMSR_LSTATUS)
1646 current_link_up = 1;
1649 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1650 if (current_link_up == 1) {
1651 if (tp->link_config.active_speed == SPEED_100 ||
1652 tp->link_config.active_speed == SPEED_10)
1653 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1655 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1657 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1659 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1660 if (tp->link_config.active_duplex == DUPLEX_HALF)
1661 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1663 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1665 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1666 (current_link_up == 1 &&
1667 tp->link_config.active_speed == SPEED_10))
1668 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1670 if (current_link_up == 1)
1671 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1674 /* ??? Without this setting Netgear GA302T PHY does not
1675 * ??? send/receive packets...
1677 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1678 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1679 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1680 tw32_f(MAC_MI_MODE, tp->mi_mode);
1684 tw32_f(MAC_MODE, tp->mac_mode);
1687 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1688 /* Polled via timer. */
1689 tw32_f(MAC_EVENT, 0);
1691 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1696 current_link_up == 1 &&
1697 tp->link_config.active_speed == SPEED_1000 &&
1698 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1699 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1702 (MAC_STATUS_SYNC_CHANGED |
1703 MAC_STATUS_CFG_CHANGED));
1706 NIC_SRAM_FIRMWARE_MBOX,
1707 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1710 if (current_link_up != netif_carrier_ok(tp->dev)) {
1711 if (current_link_up)
1712 netif_carrier_on(tp->dev);
1714 netif_carrier_off(tp->dev);
1715 tg3_link_report(tp);
1721 struct tg3_fiber_aneginfo {
1723 #define ANEG_STATE_UNKNOWN 0
1724 #define ANEG_STATE_AN_ENABLE 1
1725 #define ANEG_STATE_RESTART_INIT 2
1726 #define ANEG_STATE_RESTART 3
1727 #define ANEG_STATE_DISABLE_LINK_OK 4
1728 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1729 #define ANEG_STATE_ABILITY_DETECT 6
1730 #define ANEG_STATE_ACK_DETECT_INIT 7
1731 #define ANEG_STATE_ACK_DETECT 8
1732 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1733 #define ANEG_STATE_COMPLETE_ACK 10
1734 #define ANEG_STATE_IDLE_DETECT_INIT 11
1735 #define ANEG_STATE_IDLE_DETECT 12
1736 #define ANEG_STATE_LINK_OK 13
1737 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1738 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1741 #define MR_AN_ENABLE 0x00000001
1742 #define MR_RESTART_AN 0x00000002
1743 #define MR_AN_COMPLETE 0x00000004
1744 #define MR_PAGE_RX 0x00000008
1745 #define MR_NP_LOADED 0x00000010
1746 #define MR_TOGGLE_TX 0x00000020
1747 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1748 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1749 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1750 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1751 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1752 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1753 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1754 #define MR_TOGGLE_RX 0x00002000
1755 #define MR_NP_RX 0x00004000
1757 #define MR_LINK_OK 0x80000000
1759 unsigned long link_time, cur_time;
1761 u32 ability_match_cfg;
1762 int ability_match_count;
1764 char ability_match, idle_match, ack_match;
1766 u32 txconfig, rxconfig;
1767 #define ANEG_CFG_NP 0x00000080
1768 #define ANEG_CFG_ACK 0x00000040
1769 #define ANEG_CFG_RF2 0x00000020
1770 #define ANEG_CFG_RF1 0x00000010
1771 #define ANEG_CFG_PS2 0x00000001
1772 #define ANEG_CFG_PS1 0x00008000
1773 #define ANEG_CFG_HD 0x00004000
1774 #define ANEG_CFG_FD 0x00002000
1775 #define ANEG_CFG_INVAL 0x00001f06
1780 #define ANEG_TIMER_ENAB 2
1781 #define ANEG_FAILED -1
1783 #define ANEG_STATE_SETTLE_TIME 10000
1785 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1786 struct tg3_fiber_aneginfo *ap)
1788 unsigned long delta;
1792 if (ap->state == ANEG_STATE_UNKNOWN) {
1796 ap->ability_match_cfg = 0;
1797 ap->ability_match_count = 0;
1798 ap->ability_match = 0;
1804 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1805 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1807 if (rx_cfg_reg != ap->ability_match_cfg) {
1808 ap->ability_match_cfg = rx_cfg_reg;
1809 ap->ability_match = 0;
1810 ap->ability_match_count = 0;
1812 if (++ap->ability_match_count > 1) {
1813 ap->ability_match = 1;
1814 ap->ability_match_cfg = rx_cfg_reg;
1817 if (rx_cfg_reg & ANEG_CFG_ACK)
1825 ap->ability_match_cfg = 0;
1826 ap->ability_match_count = 0;
1827 ap->ability_match = 0;
1833 ap->rxconfig = rx_cfg_reg;
1837 case ANEG_STATE_UNKNOWN:
1838 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1839 ap->state = ANEG_STATE_AN_ENABLE;
1842 case ANEG_STATE_AN_ENABLE:
1843 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1844 if (ap->flags & MR_AN_ENABLE) {
1847 ap->ability_match_cfg = 0;
1848 ap->ability_match_count = 0;
1849 ap->ability_match = 0;
1853 ap->state = ANEG_STATE_RESTART_INIT;
1855 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1859 case ANEG_STATE_RESTART_INIT:
1860 ap->link_time = ap->cur_time;
1861 ap->flags &= ~(MR_NP_LOADED);
1863 tw32(MAC_TX_AUTO_NEG, 0);
1864 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1865 tw32_f(MAC_MODE, tp->mac_mode);
1868 ret = ANEG_TIMER_ENAB;
1869 ap->state = ANEG_STATE_RESTART;
1872 case ANEG_STATE_RESTART:
1873 delta = ap->cur_time - ap->link_time;
1874 if (delta > ANEG_STATE_SETTLE_TIME) {
1875 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1877 ret = ANEG_TIMER_ENAB;
1881 case ANEG_STATE_DISABLE_LINK_OK:
1885 case ANEG_STATE_ABILITY_DETECT_INIT:
1886 ap->flags &= ~(MR_TOGGLE_TX);
1887 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1888 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1889 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1890 tw32_f(MAC_MODE, tp->mac_mode);
1893 ap->state = ANEG_STATE_ABILITY_DETECT;
1896 case ANEG_STATE_ABILITY_DETECT:
1897 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1898 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1902 case ANEG_STATE_ACK_DETECT_INIT:
1903 ap->txconfig |= ANEG_CFG_ACK;
1904 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1905 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1906 tw32_f(MAC_MODE, tp->mac_mode);
1909 ap->state = ANEG_STATE_ACK_DETECT;
1912 case ANEG_STATE_ACK_DETECT:
1913 if (ap->ack_match != 0) {
1914 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1915 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1916 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1918 ap->state = ANEG_STATE_AN_ENABLE;
1920 } else if (ap->ability_match != 0 &&
1921 ap->rxconfig == 0) {
1922 ap->state = ANEG_STATE_AN_ENABLE;
1926 case ANEG_STATE_COMPLETE_ACK_INIT:
1927 if (ap->rxconfig & ANEG_CFG_INVAL) {
1931 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1932 MR_LP_ADV_HALF_DUPLEX |
1933 MR_LP_ADV_SYM_PAUSE |
1934 MR_LP_ADV_ASYM_PAUSE |
1935 MR_LP_ADV_REMOTE_FAULT1 |
1936 MR_LP_ADV_REMOTE_FAULT2 |
1937 MR_LP_ADV_NEXT_PAGE |
1940 if (ap->rxconfig & ANEG_CFG_FD)
1941 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1942 if (ap->rxconfig & ANEG_CFG_HD)
1943 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1944 if (ap->rxconfig & ANEG_CFG_PS1)
1945 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1946 if (ap->rxconfig & ANEG_CFG_PS2)
1947 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1948 if (ap->rxconfig & ANEG_CFG_RF1)
1949 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1950 if (ap->rxconfig & ANEG_CFG_RF2)
1951 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1952 if (ap->rxconfig & ANEG_CFG_NP)
1953 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1955 ap->link_time = ap->cur_time;
1957 ap->flags ^= (MR_TOGGLE_TX);
1958 if (ap->rxconfig & 0x0008)
1959 ap->flags |= MR_TOGGLE_RX;
1960 if (ap->rxconfig & ANEG_CFG_NP)
1961 ap->flags |= MR_NP_RX;
1962 ap->flags |= MR_PAGE_RX;
1964 ap->state = ANEG_STATE_COMPLETE_ACK;
1965 ret = ANEG_TIMER_ENAB;
1968 case ANEG_STATE_COMPLETE_ACK:
1969 if (ap->ability_match != 0 &&
1970 ap->rxconfig == 0) {
1971 ap->state = ANEG_STATE_AN_ENABLE;
1974 delta = ap->cur_time - ap->link_time;
1975 if (delta > ANEG_STATE_SETTLE_TIME) {
1976 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1977 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1979 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1980 !(ap->flags & MR_NP_RX)) {
1981 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1989 case ANEG_STATE_IDLE_DETECT_INIT:
1990 ap->link_time = ap->cur_time;
1991 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1992 tw32_f(MAC_MODE, tp->mac_mode);
1995 ap->state = ANEG_STATE_IDLE_DETECT;
1996 ret = ANEG_TIMER_ENAB;
1999 case ANEG_STATE_IDLE_DETECT:
2000 if (ap->ability_match != 0 &&
2001 ap->rxconfig == 0) {
2002 ap->state = ANEG_STATE_AN_ENABLE;
2005 delta = ap->cur_time - ap->link_time;
2006 if (delta > ANEG_STATE_SETTLE_TIME) {
2007 /* XXX another gem from the Broadcom driver :( */
2008 ap->state = ANEG_STATE_LINK_OK;
2012 case ANEG_STATE_LINK_OK:
2013 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2017 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2018 /* ??? unimplemented */
2021 case ANEG_STATE_NEXT_PAGE_WAIT:
2022 /* ??? unimplemented */
2033 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2036 struct tg3_fiber_aneginfo aninfo;
2037 int status = ANEG_FAILED;
2041 tw32_f(MAC_TX_AUTO_NEG, 0);
2043 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2044 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2047 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2050 memset(&aninfo, 0, sizeof(aninfo));
2051 aninfo.flags |= MR_AN_ENABLE;
2052 aninfo.state = ANEG_STATE_UNKNOWN;
2053 aninfo.cur_time = 0;
2055 while (++tick < 195000) {
2056 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2057 if (status == ANEG_DONE || status == ANEG_FAILED)
2063 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2064 tw32_f(MAC_MODE, tp->mac_mode);
2067 *flags = aninfo.flags;
2069 if (status == ANEG_DONE &&
2070 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2071 MR_LP_ADV_FULL_DUPLEX)))
2077 static void tg3_init_bcm8002(struct tg3 *tp)
2079 u32 mac_status = tr32(MAC_STATUS);
2082 /* Reset when initting first time or we have a link. */
2083 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2084 !(mac_status & MAC_STATUS_PCS_SYNCED))
2087 /* Set PLL lock range. */
2088 tg3_writephy(tp, 0x16, 0x8007);
2091 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2093 /* Wait for reset to complete. */
2094 /* XXX schedule_timeout() ... */
2095 for (i = 0; i < 500; i++)
2098 /* Config mode; select PMA/Ch 1 regs. */
2099 tg3_writephy(tp, 0x10, 0x8411);
2101 /* Enable auto-lock and comdet, select txclk for tx. */
2102 tg3_writephy(tp, 0x11, 0x0a10);
2104 tg3_writephy(tp, 0x18, 0x00a0);
2105 tg3_writephy(tp, 0x16, 0x41ff);
2107 /* Assert and deassert POR. */
2108 tg3_writephy(tp, 0x13, 0x0400);
2110 tg3_writephy(tp, 0x13, 0x0000);
2112 tg3_writephy(tp, 0x11, 0x0a50);
2114 tg3_writephy(tp, 0x11, 0x0a10);
2116 /* Wait for signal to stabilize */
2117 /* XXX schedule_timeout() ... */
2118 for (i = 0; i < 15000; i++)
2121 /* Deselect the channel register so we can read the PHYID
2124 tg3_writephy(tp, 0x10, 0x8011);
2127 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2129 u32 sg_dig_ctrl, sg_dig_status;
2130 u32 serdes_cfg, expected_sg_dig_ctrl;
2131 int workaround, port_a;
2132 int current_link_up;
2135 expected_sg_dig_ctrl = 0;
2138 current_link_up = 0;
2140 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2141 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2143 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2146 serdes_cfg = tr32(MAC_SERDES_CFG) &
2147 ((1 << 23) | (1 << 22) | (1 << 21) | (1 << 20));
2150 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2152 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2153 if (sg_dig_ctrl & (1 << 31)) {
2155 u32 val = serdes_cfg;
2161 tw32_f(MAC_SERDES_CFG, val);
2163 tw32_f(SG_DIG_CTRL, 0x01388400);
2165 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2166 tg3_setup_flow_control(tp, 0, 0);
2167 current_link_up = 1;
2172 /* Want auto-negotiation. */
2173 expected_sg_dig_ctrl = 0x81388400;
2175 /* Pause capability */
2176 expected_sg_dig_ctrl |= (1 << 11);
2178 /* Asymettric pause */
2179 expected_sg_dig_ctrl |= (1 << 12);
2181 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2183 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011880);
2184 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2186 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2188 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2189 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2190 MAC_STATUS_SIGNAL_DET)) {
2193 /* Giver time to negotiate (~200ms) */
2194 for (i = 0; i < 40000; i++) {
2195 sg_dig_status = tr32(SG_DIG_STATUS);
2196 if (sg_dig_status & (0x3))
2200 mac_status = tr32(MAC_STATUS);
2202 if ((sg_dig_status & (1 << 1)) &&
2203 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2204 u32 local_adv, remote_adv;
2206 local_adv = ADVERTISE_PAUSE_CAP;
2208 if (sg_dig_status & (1 << 19))
2209 remote_adv |= LPA_PAUSE_CAP;
2210 if (sg_dig_status & (1 << 20))
2211 remote_adv |= LPA_PAUSE_ASYM;
2213 tg3_setup_flow_control(tp, local_adv, remote_adv);
2214 current_link_up = 1;
2215 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2216 } else if (!(sg_dig_status & (1 << 1))) {
2217 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2218 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2221 u32 val = serdes_cfg;
2228 tw32_f(MAC_SERDES_CFG, val);
2231 tw32_f(SG_DIG_CTRL, 0x01388400);
2234 mac_status = tr32(MAC_STATUS);
2235 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2236 tg3_setup_flow_control(tp, 0, 0);
2237 current_link_up = 1;
2244 return current_link_up;
2247 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2249 int current_link_up = 0;
2251 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2252 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2256 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2260 if (fiber_autoneg(tp, &flags)) {
2261 u32 local_adv, remote_adv;
2263 local_adv = ADVERTISE_PAUSE_CAP;
2265 if (flags & MR_LP_ADV_SYM_PAUSE)
2266 remote_adv |= LPA_PAUSE_CAP;
2267 if (flags & MR_LP_ADV_ASYM_PAUSE)
2268 remote_adv |= LPA_PAUSE_ASYM;
2270 tg3_setup_flow_control(tp, local_adv, remote_adv);
2272 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2273 current_link_up = 1;
2275 for (i = 0; i < 30; i++) {
2278 (MAC_STATUS_SYNC_CHANGED |
2279 MAC_STATUS_CFG_CHANGED));
2281 if ((tr32(MAC_STATUS) &
2282 (MAC_STATUS_SYNC_CHANGED |
2283 MAC_STATUS_CFG_CHANGED)) == 0)
2287 mac_status = tr32(MAC_STATUS);
2288 if (current_link_up == 0 &&
2289 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2290 !(mac_status & MAC_STATUS_RCVD_CFG))
2291 current_link_up = 1;
2293 /* Forcing 1000FD link up. */
2294 current_link_up = 1;
2295 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2297 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2302 return current_link_up;
2305 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2308 u16 orig_active_speed;
2309 u8 orig_active_duplex;
2311 int current_link_up;
2315 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2316 TG3_FLAG_TX_PAUSE));
2317 orig_active_speed = tp->link_config.active_speed;
2318 orig_active_duplex = tp->link_config.active_duplex;
2320 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2321 netif_carrier_ok(tp->dev) &&
2322 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2323 mac_status = tr32(MAC_STATUS);
2324 mac_status &= (MAC_STATUS_PCS_SYNCED |
2325 MAC_STATUS_SIGNAL_DET |
2326 MAC_STATUS_CFG_CHANGED |
2327 MAC_STATUS_RCVD_CFG);
2328 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2329 MAC_STATUS_SIGNAL_DET)) {
2330 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2331 MAC_STATUS_CFG_CHANGED));
2336 tw32_f(MAC_TX_AUTO_NEG, 0);
2338 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2339 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2340 tw32_f(MAC_MODE, tp->mac_mode);
2343 if (tp->phy_id == PHY_ID_BCM8002)
2344 tg3_init_bcm8002(tp);
2346 /* Enable link change event even when serdes polling. */
2347 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2350 current_link_up = 0;
2351 mac_status = tr32(MAC_STATUS);
2353 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2354 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2356 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2358 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2359 tw32_f(MAC_MODE, tp->mac_mode);
2362 tp->hw_status->status =
2363 (SD_STATUS_UPDATED |
2364 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2366 for (i = 0; i < 100; i++) {
2367 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2368 MAC_STATUS_CFG_CHANGED));
2370 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2371 MAC_STATUS_CFG_CHANGED)) == 0)
2375 mac_status = tr32(MAC_STATUS);
2376 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2377 current_link_up = 0;
2378 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2379 tw32_f(MAC_MODE, (tp->mac_mode |
2380 MAC_MODE_SEND_CONFIGS));
2382 tw32_f(MAC_MODE, tp->mac_mode);
2386 if (current_link_up == 1) {
2387 tp->link_config.active_speed = SPEED_1000;
2388 tp->link_config.active_duplex = DUPLEX_FULL;
2389 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2390 LED_CTRL_LNKLED_OVERRIDE |
2391 LED_CTRL_1000MBPS_ON));
2393 tp->link_config.active_speed = SPEED_INVALID;
2394 tp->link_config.active_duplex = DUPLEX_INVALID;
2395 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2396 LED_CTRL_LNKLED_OVERRIDE |
2397 LED_CTRL_TRAFFIC_OVERRIDE));
2400 if (current_link_up != netif_carrier_ok(tp->dev)) {
2401 if (current_link_up)
2402 netif_carrier_on(tp->dev);
2404 netif_carrier_off(tp->dev);
2405 tg3_link_report(tp);
2408 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2410 if (orig_pause_cfg != now_pause_cfg ||
2411 orig_active_speed != tp->link_config.active_speed ||
2412 orig_active_duplex != tp->link_config.active_duplex)
2413 tg3_link_report(tp);
2419 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2423 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2424 err = tg3_setup_fiber_phy(tp, force_reset);
2426 err = tg3_setup_copper_phy(tp, force_reset);
2429 if (tp->link_config.active_speed == SPEED_1000 &&
2430 tp->link_config.active_duplex == DUPLEX_HALF)
2431 tw32(MAC_TX_LENGTHS,
2432 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2433 (6 << TX_LENGTHS_IPG_SHIFT) |
2434 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2436 tw32(MAC_TX_LENGTHS,
2437 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2438 (6 << TX_LENGTHS_IPG_SHIFT) |
2439 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2441 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2442 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2443 if (netif_carrier_ok(tp->dev)) {
2444 tw32(HOSTCC_STAT_COAL_TICKS,
2445 DEFAULT_STAT_COAL_TICKS);
2447 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2454 /* Tigon3 never reports partial packet sends. So we do not
2455 * need special logic to handle SKBs that have not had all
2456 * of their frags sent yet, like SunGEM does.
2458 static void tg3_tx(struct tg3 *tp)
2460 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2461 u32 sw_idx = tp->tx_cons;
2463 while (sw_idx != hw_idx) {
2464 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2465 struct sk_buff *skb = ri->skb;
2468 if (unlikely(skb == NULL))
2471 pci_unmap_single(tp->pdev,
2472 pci_unmap_addr(ri, mapping),
2478 sw_idx = NEXT_TX(sw_idx);
2480 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2481 if (unlikely(sw_idx == hw_idx))
2484 ri = &tp->tx_buffers[sw_idx];
2485 if (unlikely(ri->skb != NULL))
2488 pci_unmap_page(tp->pdev,
2489 pci_unmap_addr(ri, mapping),
2490 skb_shinfo(skb)->frags[i].size,
2493 sw_idx = NEXT_TX(sw_idx);
2496 dev_kfree_skb_irq(skb);
2499 tp->tx_cons = sw_idx;
2501 if (netif_queue_stopped(tp->dev) &&
2502 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2503 netif_wake_queue(tp->dev);
2506 /* Returns size of skb allocated or < 0 on error.
2508 * We only need to fill in the address because the other members
2509 * of the RX descriptor are invariant, see tg3_init_rings.
2511 * Note the purposeful assymetry of cpu vs. chip accesses. For
2512 * posting buffers we only dirty the first cache line of the RX
2513 * descriptor (containing the address). Whereas for the RX status
2514 * buffers the cpu only reads the last cacheline of the RX descriptor
2515 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2517 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2518 int src_idx, u32 dest_idx_unmasked)
2520 struct tg3_rx_buffer_desc *desc;
2521 struct ring_info *map, *src_map;
2522 struct sk_buff *skb;
2524 int skb_size, dest_idx;
2527 switch (opaque_key) {
2528 case RXD_OPAQUE_RING_STD:
2529 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2530 desc = &tp->rx_std[dest_idx];
2531 map = &tp->rx_std_buffers[dest_idx];
2533 src_map = &tp->rx_std_buffers[src_idx];
2534 skb_size = RX_PKT_BUF_SZ;
2537 case RXD_OPAQUE_RING_JUMBO:
2538 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2539 desc = &tp->rx_jumbo[dest_idx];
2540 map = &tp->rx_jumbo_buffers[dest_idx];
2542 src_map = &tp->rx_jumbo_buffers[src_idx];
2543 skb_size = RX_JUMBO_PKT_BUF_SZ;
2550 /* Do not overwrite any of the map or rp information
2551 * until we are sure we can commit to a new buffer.
2553 * Callers depend upon this behavior and assume that
2554 * we leave everything unchanged if we fail.
2556 skb = dev_alloc_skb(skb_size);
2561 skb_reserve(skb, tp->rx_offset);
2563 mapping = pci_map_single(tp->pdev, skb->data,
2564 skb_size - tp->rx_offset,
2565 PCI_DMA_FROMDEVICE);
2568 pci_unmap_addr_set(map, mapping, mapping);
2570 if (src_map != NULL)
2571 src_map->skb = NULL;
2573 desc->addr_hi = ((u64)mapping >> 32);
2574 desc->addr_lo = ((u64)mapping & 0xffffffff);
2579 /* We only need to move over in the address because the other
2580 * members of the RX descriptor are invariant. See notes above
2581 * tg3_alloc_rx_skb for full details.
2583 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2584 int src_idx, u32 dest_idx_unmasked)
2586 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2587 struct ring_info *src_map, *dest_map;
2590 switch (opaque_key) {
2591 case RXD_OPAQUE_RING_STD:
2592 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2593 dest_desc = &tp->rx_std[dest_idx];
2594 dest_map = &tp->rx_std_buffers[dest_idx];
2595 src_desc = &tp->rx_std[src_idx];
2596 src_map = &tp->rx_std_buffers[src_idx];
2599 case RXD_OPAQUE_RING_JUMBO:
2600 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2601 dest_desc = &tp->rx_jumbo[dest_idx];
2602 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2603 src_desc = &tp->rx_jumbo[src_idx];
2604 src_map = &tp->rx_jumbo_buffers[src_idx];
2611 dest_map->skb = src_map->skb;
2612 pci_unmap_addr_set(dest_map, mapping,
2613 pci_unmap_addr(src_map, mapping));
2614 dest_desc->addr_hi = src_desc->addr_hi;
2615 dest_desc->addr_lo = src_desc->addr_lo;
2617 src_map->skb = NULL;
2620 #if TG3_VLAN_TAG_USED
2621 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2623 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2627 /* The RX ring scheme is composed of multiple rings which post fresh
2628 * buffers to the chip, and one special ring the chip uses to report
2629 * status back to the host.
2631 * The special ring reports the status of received packets to the
2632 * host. The chip does not write into the original descriptor the
2633 * RX buffer was obtained from. The chip simply takes the original
2634 * descriptor as provided by the host, updates the status and length
2635 * field, then writes this into the next status ring entry.
2637 * Each ring the host uses to post buffers to the chip is described
2638 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2639 * it is first placed into the on-chip ram. When the packet's length
2640 * is known, it walks down the TG3_BDINFO entries to select the ring.
2641 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2642 * which is within the range of the new packet's length is chosen.
2644 * The "separate ring for rx status" scheme may sound queer, but it makes
2645 * sense from a cache coherency perspective. If only the host writes
2646 * to the buffer post rings, and only the chip writes to the rx status
2647 * rings, then cache lines never move beyond shared-modified state.
2648 * If both the host and chip were to write into the same ring, cache line
2649 * eviction could occur since both entities want it in an exclusive state.
2651 static int tg3_rx(struct tg3 *tp, int budget)
2654 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2658 hw_idx = tp->hw_status->idx[0].rx_producer;
2660 * We need to order the read of hw_idx and the read of
2661 * the opaque cookie.
2664 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2667 while (sw_idx != hw_idx && budget > 0) {
2668 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2670 struct sk_buff *skb;
2671 dma_addr_t dma_addr;
2672 u32 opaque_key, desc_idx, *post_ptr;
2674 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2675 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2676 if (opaque_key == RXD_OPAQUE_RING_STD) {
2677 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2679 skb = tp->rx_std_buffers[desc_idx].skb;
2680 post_ptr = &tp->rx_std_ptr;
2681 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2682 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2684 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2685 post_ptr = &tp->rx_jumbo_ptr;
2688 goto next_pkt_nopost;
2691 work_mask |= opaque_key;
2693 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2694 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2696 tg3_recycle_rx(tp, opaque_key,
2697 desc_idx, *post_ptr);
2699 /* Other statistics kept track of by card. */
2700 tp->net_stats.rx_dropped++;
2704 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2706 if (len > RX_COPY_THRESHOLD) {
2709 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2710 desc_idx, *post_ptr);
2714 pci_unmap_single(tp->pdev, dma_addr,
2715 skb_size - tp->rx_offset,
2716 PCI_DMA_FROMDEVICE);
2720 struct sk_buff *copy_skb;
2722 tg3_recycle_rx(tp, opaque_key,
2723 desc_idx, *post_ptr);
2725 copy_skb = dev_alloc_skb(len + 2);
2726 if (copy_skb == NULL)
2727 goto drop_it_no_recycle;
2729 copy_skb->dev = tp->dev;
2730 skb_reserve(copy_skb, 2);
2731 skb_put(copy_skb, len);
2732 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2733 memcpy(copy_skb->data, skb->data, len);
2734 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2736 /* We'll reuse the original ring buffer. */
2740 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2741 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2742 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2743 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2744 skb->ip_summed = CHECKSUM_UNNECESSARY;
2746 skb->ip_summed = CHECKSUM_NONE;
2748 skb->protocol = eth_type_trans(skb, tp->dev);
2749 #if TG3_VLAN_TAG_USED
2750 if (tp->vlgrp != NULL &&
2751 desc->type_flags & RXD_FLAG_VLAN) {
2752 tg3_vlan_rx(tp, skb,
2753 desc->err_vlan & RXD_VLAN_MASK);
2756 netif_receive_skb(skb);
2758 tp->dev->last_rx = jiffies;
2766 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2769 /* ACK the status ring. */
2770 tp->rx_rcb_ptr = rx_rcb_ptr;
2771 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2772 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2774 /* Refill RX ring(s). */
2775 if (work_mask & RXD_OPAQUE_RING_STD) {
2776 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2777 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2780 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2781 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2782 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2790 static int tg3_poll(struct net_device *netdev, int *budget)
2792 struct tg3 *tp = netdev_priv(netdev);
2793 struct tg3_hw_status *sblk = tp->hw_status;
2794 unsigned long flags;
2797 spin_lock_irqsave(&tp->lock, flags);
2799 /* handle link change and other phy events */
2800 if (!(tp->tg3_flags &
2801 (TG3_FLAG_USE_LINKCHG_REG |
2802 TG3_FLAG_POLL_SERDES))) {
2803 if (sblk->status & SD_STATUS_LINK_CHG) {
2804 sblk->status = SD_STATUS_UPDATED |
2805 (sblk->status & ~SD_STATUS_LINK_CHG);
2806 tg3_setup_phy(tp, 0);
2810 /* run TX completion thread */
2811 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2812 spin_lock(&tp->tx_lock);
2814 spin_unlock(&tp->tx_lock);
2817 spin_unlock_irqrestore(&tp->lock, flags);
2819 /* run RX thread, within the bounds set by NAPI.
2820 * All RX "locking" is done by ensuring outside
2821 * code synchronizes with dev->poll()
2824 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2825 int orig_budget = *budget;
2828 if (orig_budget > netdev->quota)
2829 orig_budget = netdev->quota;
2831 work_done = tg3_rx(tp, orig_budget);
2833 *budget -= work_done;
2834 netdev->quota -= work_done;
2836 if (work_done >= orig_budget)
2840 /* if no more work, tell net stack and NIC we're done */
2842 spin_lock_irqsave(&tp->lock, flags);
2843 __netif_rx_complete(netdev);
2844 tg3_restart_ints(tp);
2845 spin_unlock_irqrestore(&tp->lock, flags);
2848 return (done ? 0 : 1);
2851 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2853 struct tg3_hw_status *sblk = tp->hw_status;
2854 unsigned int work_exists = 0;
2856 /* check for phy events */
2857 if (!(tp->tg3_flags &
2858 (TG3_FLAG_USE_LINKCHG_REG |
2859 TG3_FLAG_POLL_SERDES))) {
2860 if (sblk->status & SD_STATUS_LINK_CHG)
2863 /* check for RX/TX work to do */
2864 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2865 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2871 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2873 struct net_device *dev = dev_id;
2874 struct tg3 *tp = netdev_priv(dev);
2875 struct tg3_hw_status *sblk = tp->hw_status;
2876 unsigned long flags;
2877 unsigned int handled = 1;
2879 spin_lock_irqsave(&tp->lock, flags);
2881 if (sblk->status & SD_STATUS_UPDATED) {
2883 * writing any value to intr-mbox-0 clears PCI INTA# and
2884 * chip-internal interrupt pending events.
2885 * writing non-zero to intr-mbox-0 additional tells the
2886 * NIC to stop sending us irqs, engaging "in-intr-handler"
2889 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2892 * Flush PCI write. This also guarantees that our
2893 * status block has been flushed to host memory.
2895 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2896 sblk->status &= ~SD_STATUS_UPDATED;
2898 if (likely(tg3_has_work(dev, tp)))
2899 netif_rx_schedule(dev); /* schedule NAPI poll */
2901 /* no work, shared interrupt perhaps? re-enable
2902 * interrupts, and flush that PCI write
2904 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2906 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2908 } else { /* shared interrupt */
2912 spin_unlock_irqrestore(&tp->lock, flags);
2914 return IRQ_RETVAL(handled);
2917 static int tg3_init_hw(struct tg3 *);
2918 static int tg3_halt(struct tg3 *);
2920 #ifdef CONFIG_NET_POLL_CONTROLLER
2921 static void tg3_poll_controller(struct net_device *dev)
2923 tg3_interrupt(dev->irq, dev, NULL);
2927 static void tg3_reset_task(void *_data)
2929 struct tg3 *tp = _data;
2930 unsigned int restart_timer;
2934 spin_lock_irq(&tp->lock);
2935 spin_lock(&tp->tx_lock);
2937 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2938 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2943 tg3_netif_start(tp);
2945 spin_unlock(&tp->tx_lock);
2946 spin_unlock_irq(&tp->lock);
2949 mod_timer(&tp->timer, jiffies + 1);
2952 static void tg3_tx_timeout(struct net_device *dev)
2954 struct tg3 *tp = netdev_priv(dev);
2956 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2959 schedule_work(&tp->reset_task);
2962 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2964 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2965 u32 guilty_entry, int guilty_len,
2966 u32 last_plus_one, u32 *start, u32 mss)
2968 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2969 dma_addr_t new_addr;
2978 /* New SKB is guaranteed to be linear. */
2980 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2982 tg3_set_txd(tp, entry, new_addr, new_skb->len,
2983 (skb->ip_summed == CHECKSUM_HW) ?
2984 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2985 *start = NEXT_TX(entry);
2987 /* Now clean up the sw ring entries. */
2989 while (entry != last_plus_one) {
2993 len = skb_headlen(skb);
2995 len = skb_shinfo(skb)->frags[i-1].size;
2996 pci_unmap_single(tp->pdev,
2997 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2998 len, PCI_DMA_TODEVICE);
3000 tp->tx_buffers[entry].skb = new_skb;
3001 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3003 tp->tx_buffers[entry].skb = NULL;
3005 entry = NEXT_TX(entry);
3013 static void tg3_set_txd(struct tg3 *tp, int entry,
3014 dma_addr_t mapping, int len, u32 flags,
3017 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3018 int is_end = (mss_and_is_end & 0x1);
3019 u32 mss = (mss_and_is_end >> 1);
3023 flags |= TXD_FLAG_END;
3024 if (flags & TXD_FLAG_VLAN) {
3025 vlan_tag = flags >> 16;
3028 vlan_tag |= (mss << TXD_MSS_SHIFT);
3030 txd->addr_hi = ((u64) mapping >> 32);
3031 txd->addr_lo = ((u64) mapping & 0xffffffff);
3032 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3033 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3036 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3038 u32 base = (u32) mapping & 0xffffffff;
3040 return ((base > 0xffffdcc0) &&
3041 (base + len + 8 < base));
3044 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3046 struct tg3 *tp = netdev_priv(dev);
3049 u32 len, entry, base_flags, mss;
3050 int would_hit_hwbug;
3051 unsigned long flags;
3053 len = skb_headlen(skb);
3055 /* No BH disabling for tx_lock here. We are running in BH disabled
3056 * context and TX reclaim runs via tp->poll inside of a software
3057 * interrupt. Rejoice!
3059 * Actually, things are not so simple. If we are to take a hw
3060 * IRQ here, we can deadlock, consider:
3069 * spin on tp->tx_lock
3071 * So we really do need to disable interrupts when taking
3074 local_irq_save(flags);
3075 if (!spin_trylock(&tp->tx_lock)) {
3076 local_irq_restore(flags);
3077 return NETDEV_TX_LOCKED;
3080 /* This is a hard error, log it. */
3081 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3082 netif_stop_queue(dev);
3083 spin_unlock_irqrestore(&tp->tx_lock, flags);
3084 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3086 return NETDEV_TX_BUSY;
3089 entry = tp->tx_prod;
3091 if (skb->ip_summed == CHECKSUM_HW)
3092 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3093 #if TG3_TSO_SUPPORT != 0
3095 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3096 (mss = skb_shinfo(skb)->tso_size) != 0) {
3097 int tcp_opt_len, ip_tcp_len;
3099 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3100 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3102 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3103 TXD_FLAG_CPU_POST_DMA);
3105 skb->nh.iph->check = 0;
3106 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3107 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3112 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3115 tsflags = ((skb->nh.iph->ihl - 5) +
3116 (tcp_opt_len >> 2));
3117 mss |= (tsflags << 11);
3120 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3123 tsflags = ((skb->nh.iph->ihl - 5) +
3124 (tcp_opt_len >> 2));
3125 base_flags |= tsflags << 12;
3132 #if TG3_VLAN_TAG_USED
3133 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3134 base_flags |= (TXD_FLAG_VLAN |
3135 (vlan_tx_tag_get(skb) << 16));
3138 /* Queue skb data, a.k.a. the main skb fragment. */
3139 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3141 tp->tx_buffers[entry].skb = skb;
3142 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3144 would_hit_hwbug = 0;
3146 if (tg3_4g_overflow_test(mapping, len))
3147 would_hit_hwbug = entry + 1;
3149 tg3_set_txd(tp, entry, mapping, len, base_flags,
3150 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3152 entry = NEXT_TX(entry);
3154 /* Now loop through additional data fragments, and queue them. */
3155 if (skb_shinfo(skb)->nr_frags > 0) {
3156 unsigned int i, last;
3158 last = skb_shinfo(skb)->nr_frags - 1;
3159 for (i = 0; i <= last; i++) {
3160 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3163 mapping = pci_map_page(tp->pdev,
3166 len, PCI_DMA_TODEVICE);
3168 tp->tx_buffers[entry].skb = NULL;
3169 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3171 if (tg3_4g_overflow_test(mapping, len)) {
3172 /* Only one should match. */
3173 if (would_hit_hwbug)
3175 would_hit_hwbug = entry + 1;
3178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3179 tg3_set_txd(tp, entry, mapping, len,
3180 base_flags, (i == last)|(mss << 1));
3182 tg3_set_txd(tp, entry, mapping, len,
3183 base_flags, (i == last));
3185 entry = NEXT_TX(entry);
3189 if (would_hit_hwbug) {
3190 u32 last_plus_one = entry;
3192 unsigned int len = 0;
3194 would_hit_hwbug -= 1;
3195 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3196 entry &= (TG3_TX_RING_SIZE - 1);
3199 while (entry != last_plus_one) {
3201 len = skb_headlen(skb);
3203 len = skb_shinfo(skb)->frags[i-1].size;
3205 if (entry == would_hit_hwbug)
3209 entry = NEXT_TX(entry);
3213 /* If the workaround fails due to memory/mapping
3214 * failure, silently drop this packet.
3216 if (tigon3_4gb_hwbug_workaround(tp, skb,
3225 /* Packets are ready, update Tx producer idx local and on card. */
3226 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3228 tp->tx_prod = entry;
3229 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3230 netif_stop_queue(dev);
3234 spin_unlock_irqrestore(&tp->tx_lock, flags);
3236 dev->trans_start = jiffies;
3238 return NETDEV_TX_OK;
3241 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3246 if (new_mtu > ETH_DATA_LEN)
3247 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3249 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3252 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3254 struct tg3 *tp = netdev_priv(dev);
3256 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3259 if (!netif_running(dev)) {
3260 /* We'll just catch it later when the
3263 tg3_set_mtu(dev, tp, new_mtu);
3268 spin_lock_irq(&tp->lock);
3269 spin_lock(&tp->tx_lock);
3273 tg3_set_mtu(dev, tp, new_mtu);
3277 tg3_netif_start(tp);
3279 spin_unlock(&tp->tx_lock);
3280 spin_unlock_irq(&tp->lock);
3285 /* Free up pending packets in all rx/tx rings.
3287 * The chip has been shut down and the driver detached from
3288 * the networking, so no interrupts or new tx packets will
3289 * end up in the driver. tp->{tx,}lock is not held and we are not
3290 * in an interrupt context and thus may sleep.
3292 static void tg3_free_rings(struct tg3 *tp)
3294 struct ring_info *rxp;
3297 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3298 rxp = &tp->rx_std_buffers[i];
3300 if (rxp->skb == NULL)
3302 pci_unmap_single(tp->pdev,
3303 pci_unmap_addr(rxp, mapping),
3304 RX_PKT_BUF_SZ - tp->rx_offset,
3305 PCI_DMA_FROMDEVICE);
3306 dev_kfree_skb_any(rxp->skb);
3310 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3311 rxp = &tp->rx_jumbo_buffers[i];
3313 if (rxp->skb == NULL)
3315 pci_unmap_single(tp->pdev,
3316 pci_unmap_addr(rxp, mapping),
3317 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3318 PCI_DMA_FROMDEVICE);
3319 dev_kfree_skb_any(rxp->skb);
3323 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3324 struct tx_ring_info *txp;
3325 struct sk_buff *skb;
3328 txp = &tp->tx_buffers[i];
3336 pci_unmap_single(tp->pdev,
3337 pci_unmap_addr(txp, mapping),
3344 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3345 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3346 pci_unmap_page(tp->pdev,
3347 pci_unmap_addr(txp, mapping),
3348 skb_shinfo(skb)->frags[j].size,
3353 dev_kfree_skb_any(skb);
3357 /* Initialize tx/rx rings for packet processing.
3359 * The chip has been shut down and the driver detached from
3360 * the networking, so no interrupts or new tx packets will
3361 * end up in the driver. tp->{tx,}lock are held and thus
3364 static void tg3_init_rings(struct tg3 *tp)
3368 /* Free up all the SKBs. */
3371 /* Zero out all descriptors. */
3372 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3373 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3374 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3375 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3377 /* Initialize invariants of the rings, we only set this
3378 * stuff once. This works because the card does not
3379 * write into the rx buffer posting rings.
3381 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3382 struct tg3_rx_buffer_desc *rxd;
3384 rxd = &tp->rx_std[i];
3385 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3387 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3388 rxd->opaque = (RXD_OPAQUE_RING_STD |
3389 (i << RXD_OPAQUE_INDEX_SHIFT));
3392 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3393 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3394 struct tg3_rx_buffer_desc *rxd;
3396 rxd = &tp->rx_jumbo[i];
3397 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3399 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3401 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3402 (i << RXD_OPAQUE_INDEX_SHIFT));
3406 /* Now allocate fresh SKBs for each rx ring. */
3407 for (i = 0; i < tp->rx_pending; i++) {
3408 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3413 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3414 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3415 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3423 * Must not be invoked with interrupt sources disabled and
3424 * the hardware shutdown down.
3426 static void tg3_free_consistent(struct tg3 *tp)
3428 if (tp->rx_std_buffers) {
3429 kfree(tp->rx_std_buffers);
3430 tp->rx_std_buffers = NULL;
3433 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3434 tp->rx_std, tp->rx_std_mapping);
3438 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3439 tp->rx_jumbo, tp->rx_jumbo_mapping);
3440 tp->rx_jumbo = NULL;
3443 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3444 tp->rx_rcb, tp->rx_rcb_mapping);
3448 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3449 tp->tx_ring, tp->tx_desc_mapping);
3452 if (tp->hw_status) {
3453 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3454 tp->hw_status, tp->status_mapping);
3455 tp->hw_status = NULL;
3458 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3459 tp->hw_stats, tp->stats_mapping);
3460 tp->hw_stats = NULL;
3465 * Must not be invoked with interrupt sources disabled and
3466 * the hardware shutdown down. Can sleep.
3468 static int tg3_alloc_consistent(struct tg3 *tp)
3470 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3472 TG3_RX_JUMBO_RING_SIZE)) +
3473 (sizeof(struct tx_ring_info) *
3476 if (!tp->rx_std_buffers)
3479 memset(tp->rx_std_buffers, 0,
3480 (sizeof(struct ring_info) *
3482 TG3_RX_JUMBO_RING_SIZE)) +
3483 (sizeof(struct tx_ring_info) *
3486 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3487 tp->tx_buffers = (struct tx_ring_info *)
3488 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3490 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3491 &tp->rx_std_mapping);
3495 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3496 &tp->rx_jumbo_mapping);
3501 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3502 &tp->rx_rcb_mapping);
3506 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3507 &tp->tx_desc_mapping);
3511 tp->hw_status = pci_alloc_consistent(tp->pdev,
3513 &tp->status_mapping);
3517 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3518 sizeof(struct tg3_hw_stats),
3519 &tp->stats_mapping);
3523 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3524 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3529 tg3_free_consistent(tp);
3533 #define MAX_WAIT_CNT 1000
3535 /* To stop a block, clear the enable bit and poll till it
3536 * clears. tp->lock is held.
3538 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3551 /* We can't enable/disable these bits of the
3552 * 5705/5750, just say success.
3565 for (i = 0; i < MAX_WAIT_CNT; i++) {
3568 if ((val & enable_bit) == 0)
3572 if (i == MAX_WAIT_CNT) {
3573 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3574 "ofs=%lx enable_bit=%x\n",
3582 /* tp->lock is held. */
3583 static int tg3_abort_hw(struct tg3 *tp)
3587 tg3_disable_ints(tp);
3589 tp->rx_mode &= ~RX_MODE_ENABLE;
3590 tw32_f(MAC_RX_MODE, tp->rx_mode);
3593 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3594 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3595 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3596 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3597 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3598 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3600 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3601 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3602 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3603 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3604 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3605 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3606 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3610 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3611 tw32_f(MAC_MODE, tp->mac_mode);
3614 tp->tx_mode &= ~TX_MODE_ENABLE;
3615 tw32_f(MAC_TX_MODE, tp->tx_mode);
3617 for (i = 0; i < MAX_WAIT_CNT; i++) {
3619 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3622 if (i >= MAX_WAIT_CNT) {
3623 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3624 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3625 tp->dev->name, tr32(MAC_TX_MODE));
3629 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3630 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3631 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3633 tw32(FTQ_RESET, 0xffffffff);
3634 tw32(FTQ_RESET, 0x00000000);
3636 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3637 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3642 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3644 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3650 /* tp->lock is held. */
3651 static int tg3_nvram_lock(struct tg3 *tp)
3653 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3656 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3657 for (i = 0; i < 8000; i++) {
3658 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3668 /* tp->lock is held. */
3669 static void tg3_nvram_unlock(struct tg3 *tp)
3671 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3672 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3675 /* tp->lock is held. */
3676 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3678 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3679 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3681 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3683 case RESET_KIND_INIT:
3684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3688 case RESET_KIND_SHUTDOWN:
3689 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3693 case RESET_KIND_SUSPEND:
3694 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3704 /* tp->lock is held. */
3705 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3707 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3709 case RESET_KIND_INIT:
3710 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3711 DRV_STATE_START_DONE);
3714 case RESET_KIND_SHUTDOWN:
3715 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3716 DRV_STATE_UNLOAD_DONE);
3725 /* tp->lock is held. */
3726 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3728 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3730 case RESET_KIND_INIT:
3731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3735 case RESET_KIND_SHUTDOWN:
3736 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3740 case RESET_KIND_SUSPEND:
3741 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3751 static void tg3_stop_fw(struct tg3 *);
3753 /* tp->lock is held. */
3754 static int tg3_chip_reset(struct tg3 *tp)
3760 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3764 * We must avoid the readl() that normally takes place.
3765 * It locks machines, causes machine checks, and other
3766 * fun things. So, temporarily disable the 5701
3767 * hardware workaround, while we do the reset.
3769 flags_save = tp->tg3_flags;
3770 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3773 val = GRC_MISC_CFG_CORECLK_RESET;
3775 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3776 if (tr32(0x7e2c) == 0x60) {
3779 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3780 tw32(GRC_MISC_CFG, (1 << 29));
3785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3787 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3788 tw32(GRC_MISC_CFG, val);
3790 /* restore 5701 hardware bug workaround flag */
3791 tp->tg3_flags = flags_save;
3793 /* Unfortunately, we have to delay before the PCI read back.
3794 * Some 575X chips even will not respond to a PCI cfg access
3795 * when the reset command is given to the chip.
3797 * How do these hardware designers expect things to work
3798 * properly if the PCI write is posted for a long period
3799 * of time? It is always necessary to have some method by
3800 * which a register read back can occur to push the write
3801 * out which does the reset.
3803 * For most tg3 variants the trick below was working.
3808 /* Flush PCI posted writes. The normal MMIO registers
3809 * are inaccessible at this time so this is the only
3810 * way to make this reliably (actually, this is no longer
3811 * the case, see above). I tried to use indirect
3812 * register read/write but this upset some 5701 variants.
3814 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3818 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3819 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3823 /* Wait for link training to complete. */
3824 for (i = 0; i < 5000; i++)
3827 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3828 pci_write_config_dword(tp->pdev, 0xc4,
3829 cfg_val | (1 << 15));
3831 /* Set PCIE max payload size and clear error status. */
3832 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3835 /* Re-enable indirect register accesses. */
3836 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3837 tp->misc_host_ctrl);
3839 /* Set MAX PCI retry to zero. */
3840 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3841 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3842 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3843 val |= PCISTATE_RETRY_SAME_DMA;
3844 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3846 pci_restore_state(tp->pdev);
3848 /* Make sure PCI-X relaxed ordering bit is clear. */
3849 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3850 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3851 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3853 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3855 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3857 tw32(0x5000, 0x400);
3860 tw32(GRC_MODE, tp->grc_mode);
3862 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3863 u32 val = tr32(0xc4);
3865 tw32(0xc4, val | (1 << 15));
3868 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3870 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3871 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3872 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3873 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3876 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3877 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3878 tw32_f(MAC_MODE, tp->mac_mode);
3880 tw32_f(MAC_MODE, 0);
3883 /* Wait for firmware initialization to complete. */
3884 for (i = 0; i < 100000; i++) {
3885 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3886 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3891 !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3892 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3893 "firmware will not restart magic=%08x\n",
3894 tp->dev->name, val);
3898 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3899 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3900 u32 val = tr32(0x7c00);
3902 tw32(0x7c00, val | (1 << 25));
3905 /* Reprobe ASF enable state. */
3906 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3907 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3908 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3909 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3912 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3913 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3914 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3916 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3923 /* tp->lock is held. */
3924 static void tg3_stop_fw(struct tg3 *tp)
3926 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3930 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3931 val = tr32(GRC_RX_CPU_EVENT);
3933 tw32(GRC_RX_CPU_EVENT, val);
3935 /* Wait for RX cpu to ACK the event. */
3936 for (i = 0; i < 100; i++) {
3937 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3944 /* tp->lock is held. */
3945 static int tg3_halt(struct tg3 *tp)
3951 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3954 err = tg3_chip_reset(tp);
3956 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3957 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3965 #define TG3_FW_RELEASE_MAJOR 0x0
3966 #define TG3_FW_RELASE_MINOR 0x0
3967 #define TG3_FW_RELEASE_FIX 0x0
3968 #define TG3_FW_START_ADDR 0x08000000
3969 #define TG3_FW_TEXT_ADDR 0x08000000
3970 #define TG3_FW_TEXT_LEN 0x9c0
3971 #define TG3_FW_RODATA_ADDR 0x080009c0
3972 #define TG3_FW_RODATA_LEN 0x60
3973 #define TG3_FW_DATA_ADDR 0x08000a40
3974 #define TG3_FW_DATA_LEN 0x20
3975 #define TG3_FW_SBSS_ADDR 0x08000a60
3976 #define TG3_FW_SBSS_LEN 0xc
3977 #define TG3_FW_BSS_ADDR 0x08000a70
3978 #define TG3_FW_BSS_LEN 0x10
3980 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3981 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3982 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3983 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3984 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3985 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3986 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3987 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3988 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3989 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3990 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3991 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3992 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3993 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3994 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3995 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3996 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3997 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3998 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3999 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4000 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4001 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4002 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4003 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4004 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4005 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4007 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4008 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4009 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4010 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4011 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4012 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4013 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4014 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4015 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4016 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4017 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4018 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4019 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4020 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4021 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4022 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4023 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4024 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4025 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4026 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4027 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4028 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4029 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4030 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4031 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4032 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4033 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4034 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4035 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4036 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4037 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4038 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4039 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4040 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4041 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4042 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4043 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4044 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4045 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4046 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4047 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4048 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4049 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4050 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4051 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4052 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4053 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4054 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4055 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4056 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4057 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4058 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4059 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4060 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4061 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4062 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4063 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4064 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4065 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4066 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4067 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4068 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4069 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4070 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4071 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4074 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4075 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4076 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4077 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4078 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4082 #if 0 /* All zeros, don't eat up space with it. */
4083 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4084 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4085 0x00000000, 0x00000000, 0x00000000, 0x00000000
4089 #define RX_CPU_SCRATCH_BASE 0x30000
4090 #define RX_CPU_SCRATCH_SIZE 0x04000
4091 #define TX_CPU_SCRATCH_BASE 0x34000
4092 #define TX_CPU_SCRATCH_SIZE 0x04000
4094 /* tp->lock is held. */
4095 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4099 if (offset == TX_CPU_BASE &&
4100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4103 if (offset == RX_CPU_BASE) {
4104 for (i = 0; i < 10000; i++) {
4105 tw32(offset + CPU_STATE, 0xffffffff);
4106 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4107 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4111 tw32(offset + CPU_STATE, 0xffffffff);
4112 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4115 for (i = 0; i < 10000; i++) {
4116 tw32(offset + CPU_STATE, 0xffffffff);
4117 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4118 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4124 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4127 (offset == RX_CPU_BASE ? "RX" : "TX"));
4134 unsigned int text_base;
4135 unsigned int text_len;
4137 unsigned int rodata_base;
4138 unsigned int rodata_len;
4140 unsigned int data_base;
4141 unsigned int data_len;
4145 /* tp->lock is held. */
4146 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4147 int cpu_scratch_size, struct fw_info *info)
4150 u32 orig_tg3_flags = tp->tg3_flags;
4151 void (*write_op)(struct tg3 *, u32, u32);
4153 if (cpu_base == TX_CPU_BASE &&
4154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4155 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4156 "TX cpu firmware on %s which is 5705.\n",
4161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4162 write_op = tg3_write_mem;
4164 write_op = tg3_write_indirect_reg32;
4166 /* Force use of PCI config space for indirect register
4169 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4171 err = tg3_halt_cpu(tp, cpu_base);
4175 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4176 write_op(tp, cpu_scratch_base + i, 0);
4177 tw32(cpu_base + CPU_STATE, 0xffffffff);
4178 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4179 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4180 write_op(tp, (cpu_scratch_base +
4181 (info->text_base & 0xffff) +
4184 info->text_data[i] : 0));
4185 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4186 write_op(tp, (cpu_scratch_base +
4187 (info->rodata_base & 0xffff) +
4189 (info->rodata_data ?
4190 info->rodata_data[i] : 0));
4191 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4192 write_op(tp, (cpu_scratch_base +
4193 (info->data_base & 0xffff) +
4196 info->data_data[i] : 0));
4201 tp->tg3_flags = orig_tg3_flags;
4205 /* tp->lock is held. */
4206 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4208 struct fw_info info;
4211 info.text_base = TG3_FW_TEXT_ADDR;
4212 info.text_len = TG3_FW_TEXT_LEN;
4213 info.text_data = &tg3FwText[0];
4214 info.rodata_base = TG3_FW_RODATA_ADDR;
4215 info.rodata_len = TG3_FW_RODATA_LEN;
4216 info.rodata_data = &tg3FwRodata[0];
4217 info.data_base = TG3_FW_DATA_ADDR;
4218 info.data_len = TG3_FW_DATA_LEN;
4219 info.data_data = NULL;
4221 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4222 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4227 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4228 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4233 /* Now startup only the RX cpu. */
4234 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4235 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4237 for (i = 0; i < 5; i++) {
4238 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4240 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4241 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4242 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4246 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4247 "to set RX CPU PC, is %08x should be %08x\n",
4248 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4252 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4253 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4258 #if TG3_TSO_SUPPORT != 0
4260 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4261 #define TG3_TSO_FW_RELASE_MINOR 0x6
4262 #define TG3_TSO_FW_RELEASE_FIX 0x0
4263 #define TG3_TSO_FW_START_ADDR 0x08000000
4264 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4265 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4266 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4267 #define TG3_TSO_FW_RODATA_LEN 0x60
4268 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4269 #define TG3_TSO_FW_DATA_LEN 0x30
4270 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4271 #define TG3_TSO_FW_SBSS_LEN 0x2c
4272 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4273 #define TG3_TSO_FW_BSS_LEN 0x894
4275 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4276 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4277 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4278 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4279 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4280 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4281 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4282 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4283 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4284 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4285 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4286 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4287 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4288 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4289 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4290 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4291 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4292 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4293 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4294 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4295 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4296 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4297 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4298 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4299 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4300 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4301 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4302 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4303 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4304 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4305 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4306 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4307 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4308 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4309 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4310 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4311 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4312 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4313 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4314 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4315 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4316 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4317 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4318 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4319 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4320 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4321 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4322 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4323 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4324 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4325 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4326 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4327 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4328 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4329 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4330 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4331 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4332 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4333 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4334 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4335 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4336 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4337 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4338 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4339 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4340 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4341 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4342 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4343 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4344 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4345 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4346 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4347 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4348 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4349 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4350 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4351 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4352 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4353 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4354 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4355 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4356 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4357 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4358 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4359 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4360 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4361 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4362 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4363 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4364 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4365 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4366 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4367 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4368 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4369 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4370 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4371 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4372 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4373 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4374 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4375 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4376 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4377 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4378 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4379 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4380 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4381 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4382 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4383 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4384 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4385 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4386 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4387 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4388 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4389 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4390 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4391 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4392 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4393 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4394 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4395 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4396 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4397 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4398 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4399 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4400 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4401 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4402 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4403 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4404 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4405 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4406 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4407 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4408 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4409 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4410 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4411 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4412 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4413 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4414 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4415 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4416 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4417 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4418 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4419 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4420 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4421 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4422 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4423 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4424 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4425 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4426 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4427 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4428 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4429 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4430 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4431 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4432 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4433 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4434 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4435 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4436 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4437 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4438 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4439 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4440 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4441 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4442 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4443 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4444 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4445 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4446 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4447 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4448 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4449 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4450 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4451 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4452 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4453 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4454 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4455 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4456 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4457 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4458 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4459 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4460 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4461 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4462 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4463 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4464 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4465 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4466 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4467 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4468 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4469 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4470 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4471 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4472 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4473 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4474 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4475 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4476 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4477 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4478 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4479 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4480 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4481 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4482 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4483 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4484 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4485 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4486 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4487 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4488 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4489 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4490 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4491 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4492 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4493 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4494 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4495 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4496 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4497 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4498 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4499 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4500 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4501 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4502 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4503 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4504 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4505 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4506 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4507 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4508 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4509 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4510 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4511 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4512 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4513 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4514 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4515 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4516 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4517 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4518 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4519 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4520 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4521 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4522 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4523 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4524 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4525 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4526 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4527 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4528 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4529 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4530 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4531 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4532 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4533 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4534 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4535 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4536 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4537 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4538 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4539 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4540 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4541 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4542 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4543 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4544 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4545 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4546 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4547 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4548 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4549 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4550 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4551 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4552 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4553 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4554 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4555 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4556 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4557 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4558 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4559 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4562 static u32 tg3TsoFwRodata[] = {
4563 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4564 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4565 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4566 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4570 static u32 tg3TsoFwData[] = {
4571 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4572 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4576 /* 5705 needs a special version of the TSO firmware. */
4577 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4578 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4579 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4580 #define TG3_TSO5_FW_START_ADDR 0x00010000
4581 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4582 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4583 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4584 #define TG3_TSO5_FW_RODATA_LEN 0x50
4585 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4586 #define TG3_TSO5_FW_DATA_LEN 0x20
4587 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4588 #define TG3_TSO5_FW_SBSS_LEN 0x28
4589 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4590 #define TG3_TSO5_FW_BSS_LEN 0x88
4592 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4593 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4594 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4595 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4596 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4597 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4598 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4599 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4600 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4601 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4602 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4603 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4604 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4605 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4606 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4607 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4608 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4609 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4610 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4611 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4612 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4613 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4614 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4615 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4616 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4617 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4618 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4619 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4620 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4621 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4622 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4623 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4624 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4625 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4626 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4627 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4628 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4629 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4630 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4631 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4632 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4633 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4634 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4635 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4636 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4637 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4638 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4639 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4640 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4641 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4642 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4643 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4644 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4645 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4646 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4647 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4648 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4649 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4650 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4651 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4652 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4653 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4654 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4655 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4656 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4657 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4658 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4659 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4660 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4661 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4662 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4663 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4664 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4665 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4666 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4667 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4668 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4669 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4670 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4671 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4672 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4673 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4674 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4675 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4676 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4677 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4678 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4679 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4680 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4681 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4682 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4683 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4684 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4685 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4686 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4687 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4688 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4689 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4690 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4691 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4692 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4693 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4694 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4695 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4696 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4697 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4698 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4699 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4700 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4701 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4702 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4703 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4704 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4705 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4706 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4707 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4708 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4709 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4710 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4711 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4712 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4713 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4714 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4715 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4716 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4717 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4718 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4719 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4720 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4721 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4722 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4723 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4724 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4725 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4726 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4727 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4728 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4729 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4730 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4731 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4732 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4733 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4734 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4735 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4736 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4737 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4738 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4739 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4740 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4741 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4742 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4743 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4744 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4745 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4746 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4747 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4748 0x00000000, 0x00000000, 0x00000000,
4751 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4752 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4753 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4754 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4755 0x00000000, 0x00000000, 0x00000000,
4758 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4759 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4760 0x00000000, 0x00000000, 0x00000000,
4763 /* tp->lock is held. */
4764 static int tg3_load_tso_firmware(struct tg3 *tp)
4766 struct fw_info info;
4767 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4774 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4775 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4776 info.text_data = &tg3Tso5FwText[0];
4777 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4778 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4779 info.rodata_data = &tg3Tso5FwRodata[0];
4780 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4781 info.data_len = TG3_TSO5_FW_DATA_LEN;
4782 info.data_data = &tg3Tso5FwData[0];
4783 cpu_base = RX_CPU_BASE;
4784 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4785 cpu_scratch_size = (info.text_len +
4788 TG3_TSO5_FW_SBSS_LEN +
4789 TG3_TSO5_FW_BSS_LEN);
4791 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4792 info.text_len = TG3_TSO_FW_TEXT_LEN;
4793 info.text_data = &tg3TsoFwText[0];
4794 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4795 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4796 info.rodata_data = &tg3TsoFwRodata[0];
4797 info.data_base = TG3_TSO_FW_DATA_ADDR;
4798 info.data_len = TG3_TSO_FW_DATA_LEN;
4799 info.data_data = &tg3TsoFwData[0];
4800 cpu_base = TX_CPU_BASE;
4801 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4802 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4805 err = tg3_load_firmware_cpu(tp, cpu_base,
4806 cpu_scratch_base, cpu_scratch_size,
4811 /* Now startup the cpu. */
4812 tw32(cpu_base + CPU_STATE, 0xffffffff);
4813 tw32_f(cpu_base + CPU_PC, info.text_base);
4815 for (i = 0; i < 5; i++) {
4816 if (tr32(cpu_base + CPU_PC) == info.text_base)
4818 tw32(cpu_base + CPU_STATE, 0xffffffff);
4819 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
4820 tw32_f(cpu_base + CPU_PC, info.text_base);
4824 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4825 "to set CPU PC, is %08x should be %08x\n",
4826 tp->dev->name, tr32(cpu_base + CPU_PC),
4830 tw32(cpu_base + CPU_STATE, 0xffffffff);
4831 tw32_f(cpu_base + CPU_MODE, 0x00000000);
4835 #endif /* TG3_TSO_SUPPORT != 0 */
4837 /* tp->lock is held. */
4838 static void __tg3_set_mac_addr(struct tg3 *tp)
4840 u32 addr_high, addr_low;
4843 addr_high = ((tp->dev->dev_addr[0] << 8) |
4844 tp->dev->dev_addr[1]);
4845 addr_low = ((tp->dev->dev_addr[2] << 24) |
4846 (tp->dev->dev_addr[3] << 16) |
4847 (tp->dev->dev_addr[4] << 8) |
4848 (tp->dev->dev_addr[5] << 0));
4849 for (i = 0; i < 4; i++) {
4850 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4851 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4854 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4855 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4856 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4857 for (i = 0; i < 12; i++) {
4858 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4859 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4863 addr_high = (tp->dev->dev_addr[0] +
4864 tp->dev->dev_addr[1] +
4865 tp->dev->dev_addr[2] +
4866 tp->dev->dev_addr[3] +
4867 tp->dev->dev_addr[4] +
4868 tp->dev->dev_addr[5]) &
4869 TX_BACKOFF_SEED_MASK;
4870 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4873 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4875 struct tg3 *tp = netdev_priv(dev);
4876 struct sockaddr *addr = p;
4878 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4880 spin_lock_irq(&tp->lock);
4881 __tg3_set_mac_addr(tp);
4882 spin_unlock_irq(&tp->lock);
4887 /* tp->lock is held. */
4888 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4889 dma_addr_t mapping, u32 maxlen_flags,
4893 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4894 ((u64) mapping >> 32));
4896 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4897 ((u64) mapping & 0xffffffff));
4899 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4902 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4904 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4908 static void __tg3_set_rx_mode(struct net_device *);
4910 /* tp->lock is held. */
4911 static int tg3_reset_hw(struct tg3 *tp)
4913 u32 val, rdmac_mode;
4916 tg3_disable_ints(tp);
4920 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4922 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4923 err = tg3_abort_hw(tp);
4928 err = tg3_chip_reset(tp);
4932 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4934 /* This works around an issue with Athlon chipsets on
4935 * B3 tigon3 silicon. This bit has no effect on any
4936 * other revision. But do not set this on PCI Express
4939 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4940 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4941 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4943 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4944 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4945 val = tr32(TG3PCI_PCISTATE);
4946 val |= PCISTATE_RETRY_SAME_DMA;
4947 tw32(TG3PCI_PCISTATE, val);
4950 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4951 /* Enable some hw fixes. */
4952 val = tr32(TG3PCI_MSI_DATA);
4953 val |= (1 << 26) | (1 << 28) | (1 << 29);
4954 tw32(TG3PCI_MSI_DATA, val);
4957 /* Descriptor ring init may make accesses to the
4958 * NIC SRAM area to setup the TX descriptors, so we
4959 * can only do this after the hardware has been
4960 * successfully reset.
4964 /* This value is determined during the probe time DMA
4965 * engine test, tg3_test_dma.
4967 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4969 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4970 GRC_MODE_4X_NIC_SEND_RINGS |
4971 GRC_MODE_NO_TX_PHDR_CSUM |
4972 GRC_MODE_NO_RX_PHDR_CSUM);
4973 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4974 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4975 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4976 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4977 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4981 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4983 /* Setup the timer prescalar register. Clock is always 66Mhz. */
4984 val = tr32(GRC_MISC_CFG);
4986 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4987 tw32(GRC_MISC_CFG, val);
4989 /* Initialize MBUF/DESC pool. */
4990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4992 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4993 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4995 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4997 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4998 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4999 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5001 #if TG3_TSO_SUPPORT != 0
5002 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5005 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5006 TG3_TSO5_FW_RODATA_LEN +
5007 TG3_TSO5_FW_DATA_LEN +
5008 TG3_TSO5_FW_SBSS_LEN +
5009 TG3_TSO5_FW_BSS_LEN);
5010 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5011 tw32(BUFMGR_MB_POOL_ADDR,
5012 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5013 tw32(BUFMGR_MB_POOL_SIZE,
5014 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5018 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5019 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5020 tp->bufmgr_config.mbuf_read_dma_low_water);
5021 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5022 tp->bufmgr_config.mbuf_mac_rx_low_water);
5023 tw32(BUFMGR_MB_HIGH_WATER,
5024 tp->bufmgr_config.mbuf_high_water);
5026 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5027 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5028 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5029 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5030 tw32(BUFMGR_MB_HIGH_WATER,
5031 tp->bufmgr_config.mbuf_high_water_jumbo);
5033 tw32(BUFMGR_DMA_LOW_WATER,
5034 tp->bufmgr_config.dma_low_water);
5035 tw32(BUFMGR_DMA_HIGH_WATER,
5036 tp->bufmgr_config.dma_high_water);
5038 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5039 for (i = 0; i < 2000; i++) {
5040 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5045 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5050 /* Setup replenish threshold. */
5051 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5053 /* Initialize TG3_BDINFO's at:
5054 * RCVDBDI_STD_BD: standard eth size rx ring
5055 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5056 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5059 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5060 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5061 * ring attribute flags
5062 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5064 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5065 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5067 * The size of each ring is fixed in the firmware, but the location is
5070 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5071 ((u64) tp->rx_std_mapping >> 32));
5072 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5073 ((u64) tp->rx_std_mapping & 0xffffffff));
5074 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5075 NIC_SRAM_RX_BUFFER_DESC);
5077 /* Don't even try to program the JUMBO/MINI buffer descriptor
5080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5082 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5083 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5085 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5086 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5088 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5089 BDINFO_FLAGS_DISABLED);
5091 /* Setup replenish threshold. */
5092 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5094 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5095 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5096 ((u64) tp->rx_jumbo_mapping >> 32));
5097 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5098 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5099 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5100 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5101 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5102 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5104 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5105 BDINFO_FLAGS_DISABLED);
5110 /* There is only one send ring on 5705/5750, no need to explicitly
5111 * disable the others.
5113 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5114 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5115 /* Clear out send RCB ring in SRAM. */
5116 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5117 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5118 BDINFO_FLAGS_DISABLED);
5123 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5124 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5126 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5127 tp->tx_desc_mapping,
5128 (TG3_TX_RING_SIZE <<
5129 BDINFO_FLAGS_MAXLEN_SHIFT),
5130 NIC_SRAM_TX_BUFFER_DESC);
5132 /* There is only one receive return ring on 5705/5750, no need
5133 * to explicitly disable the others.
5135 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5136 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5137 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5138 i += TG3_BDINFO_SIZE) {
5139 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5140 BDINFO_FLAGS_DISABLED);
5145 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5147 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5149 (TG3_RX_RCB_RING_SIZE(tp) <<
5150 BDINFO_FLAGS_MAXLEN_SHIFT),
5153 tp->rx_std_ptr = tp->rx_pending;
5154 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5157 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5158 tp->rx_jumbo_pending : 0;
5159 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5162 /* Initialize MAC address and backoff seed. */
5163 __tg3_set_mac_addr(tp);
5165 /* MTU + ethernet header + FCS + optional VLAN tag */
5166 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5168 /* The slot time is changed by tg3_setup_phy if we
5169 * run at gigabit with half duplex.
5171 tw32(MAC_TX_LENGTHS,
5172 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5173 (6 << TX_LENGTHS_IPG_SHIFT) |
5174 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5176 /* Receive rules. */
5177 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5178 tw32(RCVLPC_CONFIG, 0x0181);
5180 /* Calculate RDMAC_MODE setting early, we need it to determine
5181 * the RCVLPC_STATE_ENABLE mask.
5183 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5184 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5185 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5186 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5187 RDMAC_MODE_LNGREAD_ENAB);
5188 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5189 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5190 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5191 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5192 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5193 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5194 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5195 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5196 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5197 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5198 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5199 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5203 #if TG3_TSO_SUPPORT != 0
5204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5205 rdmac_mode |= (1 << 27);
5208 /* Receive/send statistics. */
5209 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5210 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5211 val = tr32(RCVLPC_STATS_ENABLE);
5212 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5213 tw32(RCVLPC_STATS_ENABLE, val);
5215 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5217 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5218 tw32(SNDDATAI_STATSENAB, 0xffffff);
5219 tw32(SNDDATAI_STATSCTRL,
5220 (SNDDATAI_SCTRL_ENABLE |
5221 SNDDATAI_SCTRL_FASTUPD));
5223 /* Setup host coalescing engine. */
5224 tw32(HOSTCC_MODE, 0);
5225 for (i = 0; i < 2000; i++) {
5226 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5231 tw32(HOSTCC_RXCOL_TICKS, 0);
5232 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5233 tw32(HOSTCC_RXMAX_FRAMES, 1);
5234 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5235 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5236 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5237 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5238 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5240 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5241 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5243 /* set status block DMA address */
5244 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5245 ((u64) tp->status_mapping >> 32));
5246 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5247 ((u64) tp->status_mapping & 0xffffffff));
5249 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5250 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5251 /* Status/statistics block address. See tg3_timer,
5252 * the tg3_periodic_fetch_stats call there, and
5253 * tg3_get_stats to see how this works for 5705/5750 chips.
5255 tw32(HOSTCC_STAT_COAL_TICKS,
5256 DEFAULT_STAT_COAL_TICKS);
5257 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5258 ((u64) tp->stats_mapping >> 32));
5259 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5260 ((u64) tp->stats_mapping & 0xffffffff));
5261 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5262 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5265 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5267 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5268 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5269 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5270 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5271 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5273 /* Clear statistics/status block in chip, and status block in ram. */
5274 for (i = NIC_SRAM_STATS_BLK;
5275 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5277 tg3_write_mem(tp, i, 0);
5280 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5282 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5283 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5284 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5287 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5289 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5290 GRC_LCLCTRL_GPIO_OUTPUT1);
5291 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5294 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5295 tr32(MAILBOX_INTERRUPT_0);
5297 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5298 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5299 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5303 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5304 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5305 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5306 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5307 WDMAC_MODE_LNGREAD_ENAB);
5309 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5310 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5312 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5313 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5314 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5316 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5317 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5318 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5319 val |= WDMAC_MODE_RX_ACCEL;
5323 tw32_f(WDMAC_MODE, val);
5326 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5327 val = tr32(TG3PCI_X_CAPS);
5328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5329 val &= ~PCIX_CAPS_BURST_MASK;
5330 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5331 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5332 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5333 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5334 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5335 val |= (tp->split_mode_max_reqs <<
5336 PCIX_CAPS_SPLIT_SHIFT);
5338 tw32(TG3PCI_X_CAPS, val);
5341 tw32_f(RDMAC_MODE, rdmac_mode);
5344 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5345 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5346 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5347 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5348 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5349 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5350 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5351 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5352 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5353 #if TG3_TSO_SUPPORT != 0
5354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5355 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5357 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5358 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5360 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5361 err = tg3_load_5701_a0_firmware_fix(tp);
5366 #if TG3_TSO_SUPPORT != 0
5367 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5368 err = tg3_load_tso_firmware(tp);
5374 tp->tx_mode = TX_MODE_ENABLE;
5375 tw32_f(MAC_TX_MODE, tp->tx_mode);
5378 tp->rx_mode = RX_MODE_ENABLE;
5379 tw32_f(MAC_RX_MODE, tp->rx_mode);
5382 if (tp->link_config.phy_is_low_power) {
5383 tp->link_config.phy_is_low_power = 0;
5384 tp->link_config.speed = tp->link_config.orig_speed;
5385 tp->link_config.duplex = tp->link_config.orig_duplex;
5386 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5389 tp->mi_mode = MAC_MI_MODE_BASE;
5390 tw32_f(MAC_MI_MODE, tp->mi_mode);
5393 tw32(MAC_LED_CTRL, tp->led_ctrl);
5395 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5396 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5397 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5400 tw32_f(MAC_RX_MODE, tp->rx_mode);
5403 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5405 /* Set drive transmission level to 1.2V */
5406 val = tr32(MAC_SERDES_CFG);
5409 tw32(MAC_SERDES_CFG, val);
5411 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5412 tw32(MAC_SERDES_CFG, 0x616000);
5415 /* Prevent chip from dropping frames when flow control
5418 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5421 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5422 /* Use hardware link auto-negotiation */
5423 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5426 err = tg3_setup_phy(tp, 1);
5430 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5433 /* Clear CRC stats. */
5434 tg3_readphy(tp, 0x1e, &tmp);
5435 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5436 tg3_readphy(tp, 0x14, &tmp);
5439 __tg3_set_rx_mode(tp->dev);
5441 /* Initialize receive rules. */
5442 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5443 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5444 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5445 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5448 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5452 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5456 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5458 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5460 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5462 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5464 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5466 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5468 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5470 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5472 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5474 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5476 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5478 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5480 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5482 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5490 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5492 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5493 tg3_enable_ints(tp);
5498 /* Called at device open time to get the chip ready for
5499 * packet processing. Invoked with tp->lock held.
5501 static int tg3_init_hw(struct tg3 *tp)
5505 /* Force the chip into D0. */
5506 err = tg3_set_power_state(tp, 0);
5510 tg3_switch_clocks(tp);
5512 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5514 err = tg3_reset_hw(tp);
5520 #define TG3_STAT_ADD32(PSTAT, REG) \
5521 do { u32 __val = tr32(REG); \
5522 (PSTAT)->low += __val; \
5523 if ((PSTAT)->low < __val) \
5524 (PSTAT)->high += 1; \
5527 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5529 struct tg3_hw_stats *sp = tp->hw_stats;
5531 if (!netif_carrier_ok(tp->dev))
5534 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5535 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5536 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5537 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5538 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5539 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5540 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5541 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5542 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5543 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5544 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5545 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5546 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5548 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5549 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5550 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5551 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5552 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5553 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5554 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5555 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5556 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5557 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5558 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5559 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5560 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5561 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5564 static void tg3_timer(unsigned long __opaque)
5566 struct tg3 *tp = (struct tg3 *) __opaque;
5567 unsigned long flags;
5569 spin_lock_irqsave(&tp->lock, flags);
5570 spin_lock(&tp->tx_lock);
5572 /* All of this garbage is because when using non-tagged
5573 * IRQ status the mailbox/status_block protocol the chip
5574 * uses with the cpu is race prone.
5576 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5577 tw32(GRC_LOCAL_CTRL,
5578 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5580 tw32(HOSTCC_MODE, tp->coalesce_mode |
5581 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5584 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5585 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5586 spin_unlock(&tp->tx_lock);
5587 spin_unlock_irqrestore(&tp->lock, flags);
5588 schedule_work(&tp->reset_task);
5592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5594 tg3_periodic_fetch_stats(tp);
5596 /* This part only runs once per second. */
5597 if (!--tp->timer_counter) {
5598 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5602 mac_stat = tr32(MAC_STATUS);
5605 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5606 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5608 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5612 tg3_setup_phy(tp, 0);
5613 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5614 u32 mac_stat = tr32(MAC_STATUS);
5617 if (netif_carrier_ok(tp->dev) &&
5618 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5621 if (! netif_carrier_ok(tp->dev) &&
5622 (mac_stat & (MAC_STATUS_PCS_SYNCED |
5623 MAC_STATUS_SIGNAL_DET))) {
5629 ~MAC_MODE_PORT_MODE_MASK));
5631 tw32_f(MAC_MODE, tp->mac_mode);
5633 tg3_setup_phy(tp, 0);
5637 tp->timer_counter = tp->timer_multiplier;
5640 /* Heartbeat is only sent once every 120 seconds. */
5641 if (!--tp->asf_counter) {
5642 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5645 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5646 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5647 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5648 val = tr32(GRC_RX_CPU_EVENT);
5650 tw32(GRC_RX_CPU_EVENT, val);
5652 tp->asf_counter = tp->asf_multiplier;
5655 spin_unlock(&tp->tx_lock);
5656 spin_unlock_irqrestore(&tp->lock, flags);
5658 tp->timer.expires = jiffies + tp->timer_offset;
5659 add_timer(&tp->timer);
5662 static int tg3_open(struct net_device *dev)
5664 struct tg3 *tp = netdev_priv(dev);
5667 spin_lock_irq(&tp->lock);
5668 spin_lock(&tp->tx_lock);
5670 tg3_disable_ints(tp);
5671 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5673 spin_unlock(&tp->tx_lock);
5674 spin_unlock_irq(&tp->lock);
5676 /* The placement of this call is tied
5677 * to the setup and use of Host TX descriptors.
5679 err = tg3_alloc_consistent(tp);
5683 err = request_irq(dev->irq, tg3_interrupt,
5684 SA_SHIRQ, dev->name, dev);
5687 tg3_free_consistent(tp);
5691 spin_lock_irq(&tp->lock);
5692 spin_lock(&tp->tx_lock);
5694 err = tg3_init_hw(tp);
5699 tp->timer_offset = HZ / 10;
5700 tp->timer_counter = tp->timer_multiplier = 10;
5701 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5703 init_timer(&tp->timer);
5704 tp->timer.expires = jiffies + tp->timer_offset;
5705 tp->timer.data = (unsigned long) tp;
5706 tp->timer.function = tg3_timer;
5707 add_timer(&tp->timer);
5709 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5712 spin_unlock(&tp->tx_lock);
5713 spin_unlock_irq(&tp->lock);
5716 free_irq(dev->irq, dev);
5717 tg3_free_consistent(tp);
5721 spin_lock_irq(&tp->lock);
5722 spin_lock(&tp->tx_lock);
5724 tg3_enable_ints(tp);
5726 spin_unlock(&tp->tx_lock);
5727 spin_unlock_irq(&tp->lock);
5729 netif_start_queue(dev);
5735 /*static*/ void tg3_dump_state(struct tg3 *tp)
5737 u32 val32, val32_2, val32_3, val32_4, val32_5;
5741 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5742 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5743 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5747 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5748 tr32(MAC_MODE), tr32(MAC_STATUS));
5749 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5750 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5751 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5752 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5753 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5754 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5756 /* Send data initiator control block */
5757 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5758 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5759 printk(" SNDDATAI_STATSCTRL[%08x]\n",
5760 tr32(SNDDATAI_STATSCTRL));
5762 /* Send data completion control block */
5763 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5765 /* Send BD ring selector block */
5766 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5767 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5769 /* Send BD initiator control block */
5770 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5771 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5773 /* Send BD completion control block */
5774 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5776 /* Receive list placement control block */
5777 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5778 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5779 printk(" RCVLPC_STATSCTRL[%08x]\n",
5780 tr32(RCVLPC_STATSCTRL));
5782 /* Receive data and receive BD initiator control block */
5783 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5784 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5786 /* Receive data completion control block */
5787 printk("DEBUG: RCVDCC_MODE[%08x]\n",
5790 /* Receive BD initiator control block */
5791 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5792 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5794 /* Receive BD completion control block */
5795 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5796 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5798 /* Receive list selector control block */
5799 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5800 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5802 /* Mbuf cluster free block */
5803 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5804 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5806 /* Host coalescing control block */
5807 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5808 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5809 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5810 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5811 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5812 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5813 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5814 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5815 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5816 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5817 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5818 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5820 /* Memory arbiter control block */
5821 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5822 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5824 /* Buffer manager control block */
5825 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5826 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5827 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5828 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5829 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5830 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5831 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5832 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5834 /* Read DMA control block */
5835 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5836 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5838 /* Write DMA control block */
5839 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5840 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5842 /* DMA completion block */
5843 printk("DEBUG: DMAC_MODE[%08x]\n",
5847 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5848 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5849 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5850 tr32(GRC_LOCAL_CTRL));
5853 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5854 tr32(RCVDBDI_JUMBO_BD + 0x0),
5855 tr32(RCVDBDI_JUMBO_BD + 0x4),
5856 tr32(RCVDBDI_JUMBO_BD + 0x8),
5857 tr32(RCVDBDI_JUMBO_BD + 0xc));
5858 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5859 tr32(RCVDBDI_STD_BD + 0x0),
5860 tr32(RCVDBDI_STD_BD + 0x4),
5861 tr32(RCVDBDI_STD_BD + 0x8),
5862 tr32(RCVDBDI_STD_BD + 0xc));
5863 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5864 tr32(RCVDBDI_MINI_BD + 0x0),
5865 tr32(RCVDBDI_MINI_BD + 0x4),
5866 tr32(RCVDBDI_MINI_BD + 0x8),
5867 tr32(RCVDBDI_MINI_BD + 0xc));
5869 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5870 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5871 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5872 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5873 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5874 val32, val32_2, val32_3, val32_4);
5876 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5877 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5878 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5879 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5880 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5881 val32, val32_2, val32_3, val32_4);
5883 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5884 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5885 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5886 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5887 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5888 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5889 val32, val32_2, val32_3, val32_4, val32_5);
5891 /* SW status block */
5892 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5893 tp->hw_status->status,
5894 tp->hw_status->status_tag,
5895 tp->hw_status->rx_jumbo_consumer,
5896 tp->hw_status->rx_consumer,
5897 tp->hw_status->rx_mini_consumer,
5898 tp->hw_status->idx[0].rx_producer,
5899 tp->hw_status->idx[0].tx_consumer);
5901 /* SW statistics block */
5902 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5903 ((u32 *)tp->hw_stats)[0],
5904 ((u32 *)tp->hw_stats)[1],
5905 ((u32 *)tp->hw_stats)[2],
5906 ((u32 *)tp->hw_stats)[3]);
5909 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5910 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5911 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5912 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5913 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5915 /* NIC side send descriptors. */
5916 for (i = 0; i < 6; i++) {
5919 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5920 + (i * sizeof(struct tg3_tx_buffer_desc));
5921 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5923 readl(txd + 0x0), readl(txd + 0x4),
5924 readl(txd + 0x8), readl(txd + 0xc));
5927 /* NIC side RX descriptors. */
5928 for (i = 0; i < 6; i++) {
5931 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5932 + (i * sizeof(struct tg3_rx_buffer_desc));
5933 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5935 readl(rxd + 0x0), readl(rxd + 0x4),
5936 readl(rxd + 0x8), readl(rxd + 0xc));
5937 rxd += (4 * sizeof(u32));
5938 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5940 readl(rxd + 0x0), readl(rxd + 0x4),
5941 readl(rxd + 0x8), readl(rxd + 0xc));
5944 for (i = 0; i < 6; i++) {
5947 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5948 + (i * sizeof(struct tg3_rx_buffer_desc));
5949 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5951 readl(rxd + 0x0), readl(rxd + 0x4),
5952 readl(rxd + 0x8), readl(rxd + 0xc));
5953 rxd += (4 * sizeof(u32));
5954 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5956 readl(rxd + 0x0), readl(rxd + 0x4),
5957 readl(rxd + 0x8), readl(rxd + 0xc));
5962 static struct net_device_stats *tg3_get_stats(struct net_device *);
5963 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5965 static int tg3_close(struct net_device *dev)
5967 struct tg3 *tp = netdev_priv(dev);
5969 netif_stop_queue(dev);
5971 del_timer_sync(&tp->timer);
5973 spin_lock_irq(&tp->lock);
5974 spin_lock(&tp->tx_lock);
5979 tg3_disable_ints(tp);
5984 ~(TG3_FLAG_INIT_COMPLETE |
5985 TG3_FLAG_GOT_SERDES_FLOWCTL);
5986 netif_carrier_off(tp->dev);
5988 spin_unlock(&tp->tx_lock);
5989 spin_unlock_irq(&tp->lock);
5991 free_irq(dev->irq, dev);
5993 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5994 sizeof(tp->net_stats_prev));
5995 memcpy(&tp->estats_prev, tg3_get_estats(tp),
5996 sizeof(tp->estats_prev));
5998 tg3_free_consistent(tp);
6003 static inline unsigned long get_stat64(tg3_stat64_t *val)
6007 #if (BITS_PER_LONG == 32)
6010 ret = ((u64)val->high << 32) | ((u64)val->low);
6015 static unsigned long calc_crc_errors(struct tg3 *tp)
6017 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6019 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6020 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6022 unsigned long flags;
6025 spin_lock_irqsave(&tp->lock, flags);
6026 tg3_readphy(tp, 0x1e, &val);
6027 tg3_writephy(tp, 0x1e, val | 0x8000);
6028 tg3_readphy(tp, 0x14, &val);
6029 spin_unlock_irqrestore(&tp->lock, flags);
6031 tp->phy_crc_errors += val;
6033 return tp->phy_crc_errors;
6036 return get_stat64(&hw_stats->rx_fcs_errors);
6039 #define ESTAT_ADD(member) \
6040 estats->member = old_estats->member + \
6041 get_stat64(&hw_stats->member)
6043 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6045 struct tg3_ethtool_stats *estats = &tp->estats;
6046 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6047 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6052 ESTAT_ADD(rx_octets);
6053 ESTAT_ADD(rx_fragments);
6054 ESTAT_ADD(rx_ucast_packets);
6055 ESTAT_ADD(rx_mcast_packets);
6056 ESTAT_ADD(rx_bcast_packets);
6057 ESTAT_ADD(rx_fcs_errors);
6058 ESTAT_ADD(rx_align_errors);
6059 ESTAT_ADD(rx_xon_pause_rcvd);
6060 ESTAT_ADD(rx_xoff_pause_rcvd);
6061 ESTAT_ADD(rx_mac_ctrl_rcvd);
6062 ESTAT_ADD(rx_xoff_entered);
6063 ESTAT_ADD(rx_frame_too_long_errors);
6064 ESTAT_ADD(rx_jabbers);
6065 ESTAT_ADD(rx_undersize_packets);
6066 ESTAT_ADD(rx_in_length_errors);
6067 ESTAT_ADD(rx_out_length_errors);
6068 ESTAT_ADD(rx_64_or_less_octet_packets);
6069 ESTAT_ADD(rx_65_to_127_octet_packets);
6070 ESTAT_ADD(rx_128_to_255_octet_packets);
6071 ESTAT_ADD(rx_256_to_511_octet_packets);
6072 ESTAT_ADD(rx_512_to_1023_octet_packets);
6073 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6074 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6075 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6076 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6077 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6079 ESTAT_ADD(tx_octets);
6080 ESTAT_ADD(tx_collisions);
6081 ESTAT_ADD(tx_xon_sent);
6082 ESTAT_ADD(tx_xoff_sent);
6083 ESTAT_ADD(tx_flow_control);
6084 ESTAT_ADD(tx_mac_errors);
6085 ESTAT_ADD(tx_single_collisions);
6086 ESTAT_ADD(tx_mult_collisions);
6087 ESTAT_ADD(tx_deferred);
6088 ESTAT_ADD(tx_excessive_collisions);
6089 ESTAT_ADD(tx_late_collisions);
6090 ESTAT_ADD(tx_collide_2times);
6091 ESTAT_ADD(tx_collide_3times);
6092 ESTAT_ADD(tx_collide_4times);
6093 ESTAT_ADD(tx_collide_5times);
6094 ESTAT_ADD(tx_collide_6times);
6095 ESTAT_ADD(tx_collide_7times);
6096 ESTAT_ADD(tx_collide_8times);
6097 ESTAT_ADD(tx_collide_9times);
6098 ESTAT_ADD(tx_collide_10times);
6099 ESTAT_ADD(tx_collide_11times);
6100 ESTAT_ADD(tx_collide_12times);
6101 ESTAT_ADD(tx_collide_13times);
6102 ESTAT_ADD(tx_collide_14times);
6103 ESTAT_ADD(tx_collide_15times);
6104 ESTAT_ADD(tx_ucast_packets);
6105 ESTAT_ADD(tx_mcast_packets);
6106 ESTAT_ADD(tx_bcast_packets);
6107 ESTAT_ADD(tx_carrier_sense_errors);
6108 ESTAT_ADD(tx_discards);
6109 ESTAT_ADD(tx_errors);
6111 ESTAT_ADD(dma_writeq_full);
6112 ESTAT_ADD(dma_write_prioq_full);
6113 ESTAT_ADD(rxbds_empty);
6114 ESTAT_ADD(rx_discards);
6115 ESTAT_ADD(rx_errors);
6116 ESTAT_ADD(rx_threshold_hit);
6118 ESTAT_ADD(dma_readq_full);
6119 ESTAT_ADD(dma_read_prioq_full);
6120 ESTAT_ADD(tx_comp_queue_full);
6122 ESTAT_ADD(ring_set_send_prod_index);
6123 ESTAT_ADD(ring_status_update);
6124 ESTAT_ADD(nic_irqs);
6125 ESTAT_ADD(nic_avoided_irqs);
6126 ESTAT_ADD(nic_tx_threshold_hit);
6131 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6133 struct tg3 *tp = netdev_priv(dev);
6134 struct net_device_stats *stats = &tp->net_stats;
6135 struct net_device_stats *old_stats = &tp->net_stats_prev;
6136 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6141 stats->rx_packets = old_stats->rx_packets +
6142 get_stat64(&hw_stats->rx_ucast_packets) +
6143 get_stat64(&hw_stats->rx_mcast_packets) +
6144 get_stat64(&hw_stats->rx_bcast_packets);
6146 stats->tx_packets = old_stats->tx_packets +
6147 get_stat64(&hw_stats->tx_ucast_packets) +
6148 get_stat64(&hw_stats->tx_mcast_packets) +
6149 get_stat64(&hw_stats->tx_bcast_packets);
6151 stats->rx_bytes = old_stats->rx_bytes +
6152 get_stat64(&hw_stats->rx_octets);
6153 stats->tx_bytes = old_stats->tx_bytes +
6154 get_stat64(&hw_stats->tx_octets);
6156 stats->rx_errors = old_stats->rx_errors +
6157 get_stat64(&hw_stats->rx_errors) +
6158 get_stat64(&hw_stats->rx_discards);
6159 stats->tx_errors = old_stats->tx_errors +
6160 get_stat64(&hw_stats->tx_errors) +
6161 get_stat64(&hw_stats->tx_mac_errors) +
6162 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6163 get_stat64(&hw_stats->tx_discards);
6165 stats->multicast = old_stats->multicast +
6166 get_stat64(&hw_stats->rx_mcast_packets);
6167 stats->collisions = old_stats->collisions +
6168 get_stat64(&hw_stats->tx_collisions);
6170 stats->rx_length_errors = old_stats->rx_length_errors +
6171 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6172 get_stat64(&hw_stats->rx_undersize_packets);
6174 stats->rx_over_errors = old_stats->rx_over_errors +
6175 get_stat64(&hw_stats->rxbds_empty);
6176 stats->rx_frame_errors = old_stats->rx_frame_errors +
6177 get_stat64(&hw_stats->rx_align_errors);
6178 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6179 get_stat64(&hw_stats->tx_discards);
6180 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6181 get_stat64(&hw_stats->tx_carrier_sense_errors);
6183 stats->rx_crc_errors = old_stats->rx_crc_errors +
6184 calc_crc_errors(tp);
6189 static inline u32 calc_crc(unsigned char *buf, int len)
6197 for (j = 0; j < len; j++) {
6200 for (k = 0; k < 8; k++) {
6214 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6216 /* accept or reject all multicast frames */
6217 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6218 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6219 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6220 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6223 static void __tg3_set_rx_mode(struct net_device *dev)
6225 struct tg3 *tp = netdev_priv(dev);
6228 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6229 RX_MODE_KEEP_VLAN_TAG);
6231 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6234 #if TG3_VLAN_TAG_USED
6236 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6237 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6239 /* By definition, VLAN is disabled always in this
6242 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6243 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6246 if (dev->flags & IFF_PROMISC) {
6247 /* Promiscuous mode. */
6248 rx_mode |= RX_MODE_PROMISC;
6249 } else if (dev->flags & IFF_ALLMULTI) {
6250 /* Accept all multicast. */
6251 tg3_set_multi (tp, 1);
6252 } else if (dev->mc_count < 1) {
6253 /* Reject all multicast. */
6254 tg3_set_multi (tp, 0);
6256 /* Accept one or more multicast(s). */
6257 struct dev_mc_list *mclist;
6259 u32 mc_filter[4] = { 0, };
6264 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6265 i++, mclist = mclist->next) {
6267 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6269 regidx = (bit & 0x60) >> 5;
6271 mc_filter[regidx] |= (1 << bit);
6274 tw32(MAC_HASH_REG_0, mc_filter[0]);
6275 tw32(MAC_HASH_REG_1, mc_filter[1]);
6276 tw32(MAC_HASH_REG_2, mc_filter[2]);
6277 tw32(MAC_HASH_REG_3, mc_filter[3]);
6280 if (rx_mode != tp->rx_mode) {
6281 tp->rx_mode = rx_mode;
6282 tw32_f(MAC_RX_MODE, rx_mode);
6287 static void tg3_set_rx_mode(struct net_device *dev)
6289 struct tg3 *tp = netdev_priv(dev);
6291 spin_lock_irq(&tp->lock);
6292 spin_lock(&tp->tx_lock);
6293 __tg3_set_rx_mode(dev);
6294 spin_unlock(&tp->tx_lock);
6295 spin_unlock_irq(&tp->lock);
6298 #define TG3_REGDUMP_LEN (32 * 1024)
6300 static int tg3_get_regs_len(struct net_device *dev)
6302 return TG3_REGDUMP_LEN;
6305 static void tg3_get_regs(struct net_device *dev,
6306 struct ethtool_regs *regs, void *_p)
6309 struct tg3 *tp = netdev_priv(dev);
6315 memset(p, 0, TG3_REGDUMP_LEN);
6317 spin_lock_irq(&tp->lock);
6318 spin_lock(&tp->tx_lock);
6320 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6321 #define GET_REG32_LOOP(base,len) \
6322 do { p = (u32 *)(orig_p + (base)); \
6323 for (i = 0; i < len; i += 4) \
6324 __GET_REG32((base) + i); \
6326 #define GET_REG32_1(reg) \
6327 do { p = (u32 *)(orig_p + (reg)); \
6328 __GET_REG32((reg)); \
6331 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6332 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6333 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6334 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6335 GET_REG32_1(SNDDATAC_MODE);
6336 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6337 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6338 GET_REG32_1(SNDBDC_MODE);
6339 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6340 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6341 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6342 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6343 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6344 GET_REG32_1(RCVDCC_MODE);
6345 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6346 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6347 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6348 GET_REG32_1(MBFREE_MODE);
6349 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6350 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6351 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6352 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6353 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6354 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6355 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6356 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6357 GET_REG32_LOOP(FTQ_RESET, 0x120);
6358 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6359 GET_REG32_1(DMAC_MODE);
6360 GET_REG32_LOOP(GRC_MODE, 0x4c);
6361 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6362 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6365 #undef GET_REG32_LOOP
6368 spin_unlock(&tp->tx_lock);
6369 spin_unlock_irq(&tp->lock);
6372 static int tg3_get_eeprom_len(struct net_device *dev)
6374 return EEPROM_CHIP_SIZE;
6377 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
6378 u32 offset, u32 *val);
6379 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6381 struct tg3 *tp = netdev_priv(dev);
6384 u32 i, offset, len, val, b_offset, b_count;
6386 offset = eeprom->offset;
6390 ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6393 eeprom->magic = swab32(eeprom->magic);
6396 /* adjustments to start on required 4 byte boundary */
6397 b_offset = offset & 3;
6398 b_count = 4 - b_offset;
6399 if (b_count > len) {
6400 /* i.e. offset=1 len=2 */
6403 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6406 memcpy(data, ((char*)&val) + b_offset, b_count);
6409 eeprom->len += b_count;
6412 /* read bytes upto the last 4 byte boundary */
6413 pd = &data[eeprom->len];
6414 for (i = 0; i < (len - (len & 3)); i += 4) {
6415 ret = tg3_nvram_read_using_eeprom(tp, offset + i,
6425 /* read last bytes not ending on 4 byte boundary */
6426 pd = &data[eeprom->len];
6428 b_offset = offset + len - b_count;
6429 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6432 memcpy(pd, ((char*)&val), b_count);
6433 eeprom->len += b_count;
6438 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6440 struct tg3 *tp = netdev_priv(dev);
6442 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6443 tp->link_config.phy_is_low_power)
6446 cmd->supported = (SUPPORTED_Autoneg);
6448 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6449 cmd->supported |= (SUPPORTED_1000baseT_Half |
6450 SUPPORTED_1000baseT_Full);
6452 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6453 cmd->supported |= (SUPPORTED_100baseT_Half |
6454 SUPPORTED_100baseT_Full |
6455 SUPPORTED_10baseT_Half |
6456 SUPPORTED_10baseT_Full |
6459 cmd->supported |= SUPPORTED_FIBRE;
6461 cmd->advertising = tp->link_config.advertising;
6462 cmd->speed = tp->link_config.active_speed;
6463 cmd->duplex = tp->link_config.active_duplex;
6465 cmd->phy_address = PHY_ADDR;
6466 cmd->transceiver = 0;
6467 cmd->autoneg = tp->link_config.autoneg;
6473 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6475 struct tg3 *tp = netdev_priv(dev);
6477 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6478 tp->link_config.phy_is_low_power)
6481 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6482 /* These are the only valid advertisement bits allowed. */
6483 if (cmd->autoneg == AUTONEG_ENABLE &&
6484 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6485 ADVERTISED_1000baseT_Full |
6486 ADVERTISED_Autoneg |
6491 spin_lock_irq(&tp->lock);
6492 spin_lock(&tp->tx_lock);
6494 tp->link_config.autoneg = cmd->autoneg;
6495 if (cmd->autoneg == AUTONEG_ENABLE) {
6496 tp->link_config.advertising = cmd->advertising;
6497 tp->link_config.speed = SPEED_INVALID;
6498 tp->link_config.duplex = DUPLEX_INVALID;
6500 tp->link_config.advertising = 0;
6501 tp->link_config.speed = cmd->speed;
6502 tp->link_config.duplex = cmd->duplex;
6505 tg3_setup_phy(tp, 1);
6506 spin_unlock(&tp->tx_lock);
6507 spin_unlock_irq(&tp->lock);
6512 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6514 struct tg3 *tp = netdev_priv(dev);
6516 strcpy(info->driver, DRV_MODULE_NAME);
6517 strcpy(info->version, DRV_MODULE_VERSION);
6518 strcpy(info->bus_info, pci_name(tp->pdev));
6521 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6523 struct tg3 *tp = netdev_priv(dev);
6525 wol->supported = WAKE_MAGIC;
6527 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6528 wol->wolopts = WAKE_MAGIC;
6529 memset(&wol->sopass, 0, sizeof(wol->sopass));
6532 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6534 struct tg3 *tp = netdev_priv(dev);
6536 if (wol->wolopts & ~WAKE_MAGIC)
6538 if ((wol->wolopts & WAKE_MAGIC) &&
6539 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6540 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6543 spin_lock_irq(&tp->lock);
6544 if (wol->wolopts & WAKE_MAGIC)
6545 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6547 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6548 spin_unlock_irq(&tp->lock);
6553 static u32 tg3_get_msglevel(struct net_device *dev)
6555 struct tg3 *tp = netdev_priv(dev);
6556 return tp->msg_enable;
6559 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6561 struct tg3 *tp = netdev_priv(dev);
6562 tp->msg_enable = value;
6565 #if TG3_TSO_SUPPORT != 0
6566 static int tg3_set_tso(struct net_device *dev, u32 value)
6568 struct tg3 *tp = netdev_priv(dev);
6570 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6575 return ethtool_op_set_tso(dev, value);
6579 static int tg3_nway_reset(struct net_device *dev)
6581 struct tg3 *tp = netdev_priv(dev);
6585 spin_lock_irq(&tp->lock);
6586 tg3_readphy(tp, MII_BMCR, &bmcr);
6587 tg3_readphy(tp, MII_BMCR, &bmcr);
6589 if (bmcr & BMCR_ANENABLE) {
6590 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6593 spin_unlock_irq(&tp->lock);
6598 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6600 struct tg3 *tp = netdev_priv(dev);
6602 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6603 ering->rx_mini_max_pending = 0;
6604 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6606 ering->rx_pending = tp->rx_pending;
6607 ering->rx_mini_pending = 0;
6608 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6609 ering->tx_pending = tp->tx_pending;
6612 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6614 struct tg3 *tp = netdev_priv(dev);
6616 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6617 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6618 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6622 spin_lock_irq(&tp->lock);
6623 spin_lock(&tp->tx_lock);
6625 tp->rx_pending = ering->rx_pending;
6627 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6628 tp->rx_pending > 63)
6629 tp->rx_pending = 63;
6630 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6631 tp->tx_pending = ering->tx_pending;
6635 tg3_netif_start(tp);
6636 spin_unlock(&tp->tx_lock);
6637 spin_unlock_irq(&tp->lock);
6642 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6644 struct tg3 *tp = netdev_priv(dev);
6646 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6647 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6648 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6651 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6653 struct tg3 *tp = netdev_priv(dev);
6656 spin_lock_irq(&tp->lock);
6657 spin_lock(&tp->tx_lock);
6658 if (epause->autoneg)
6659 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6661 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6662 if (epause->rx_pause)
6663 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6665 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6666 if (epause->tx_pause)
6667 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6669 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6672 tg3_netif_start(tp);
6673 spin_unlock(&tp->tx_lock);
6674 spin_unlock_irq(&tp->lock);
6679 static u32 tg3_get_rx_csum(struct net_device *dev)
6681 struct tg3 *tp = netdev_priv(dev);
6682 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6685 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6687 struct tg3 *tp = netdev_priv(dev);
6689 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6695 spin_lock_irq(&tp->lock);
6697 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6699 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6700 spin_unlock_irq(&tp->lock);
6705 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6707 struct tg3 *tp = netdev_priv(dev);
6709 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6716 dev->features |= NETIF_F_IP_CSUM;
6718 dev->features &= ~NETIF_F_IP_CSUM;
6723 static int tg3_get_stats_count (struct net_device *dev)
6725 return TG3_NUM_STATS;
6728 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6730 switch (stringset) {
6732 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
6735 WARN_ON(1); /* we need a WARN() */
6740 static void tg3_get_ethtool_stats (struct net_device *dev,
6741 struct ethtool_stats *estats, u64 *tmp_stats)
6743 struct tg3 *tp = netdev_priv(dev);
6744 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6747 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6749 struct mii_ioctl_data *data = if_mii(ifr);
6750 struct tg3 *tp = netdev_priv(dev);
6755 data->phy_id = PHY_ADDR;
6761 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6762 break; /* We have no PHY */
6764 spin_lock_irq(&tp->lock);
6765 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6766 spin_unlock_irq(&tp->lock);
6768 data->val_out = mii_regval;
6774 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6775 break; /* We have no PHY */
6777 if (!capable(CAP_NET_ADMIN))
6780 spin_lock_irq(&tp->lock);
6781 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6782 spin_unlock_irq(&tp->lock);
6793 #if TG3_VLAN_TAG_USED
6794 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6796 struct tg3 *tp = netdev_priv(dev);
6798 spin_lock_irq(&tp->lock);
6799 spin_lock(&tp->tx_lock);
6803 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6804 __tg3_set_rx_mode(dev);
6806 spin_unlock(&tp->tx_lock);
6807 spin_unlock_irq(&tp->lock);
6810 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6812 struct tg3 *tp = netdev_priv(dev);
6814 spin_lock_irq(&tp->lock);
6815 spin_lock(&tp->tx_lock);
6817 tp->vlgrp->vlan_devices[vid] = NULL;
6818 spin_unlock(&tp->tx_lock);
6819 spin_unlock_irq(&tp->lock);
6823 static struct ethtool_ops tg3_ethtool_ops = {
6824 .get_settings = tg3_get_settings,
6825 .set_settings = tg3_set_settings,
6826 .get_drvinfo = tg3_get_drvinfo,
6827 .get_regs_len = tg3_get_regs_len,
6828 .get_regs = tg3_get_regs,
6829 .get_wol = tg3_get_wol,
6830 .set_wol = tg3_set_wol,
6831 .get_msglevel = tg3_get_msglevel,
6832 .set_msglevel = tg3_set_msglevel,
6833 .nway_reset = tg3_nway_reset,
6834 .get_link = ethtool_op_get_link,
6835 .get_eeprom_len = tg3_get_eeprom_len,
6836 .get_eeprom = tg3_get_eeprom,
6837 .get_ringparam = tg3_get_ringparam,
6838 .set_ringparam = tg3_set_ringparam,
6839 .get_pauseparam = tg3_get_pauseparam,
6840 .set_pauseparam = tg3_set_pauseparam,
6841 .get_rx_csum = tg3_get_rx_csum,
6842 .set_rx_csum = tg3_set_rx_csum,
6843 .get_tx_csum = ethtool_op_get_tx_csum,
6844 .set_tx_csum = tg3_set_tx_csum,
6845 .get_sg = ethtool_op_get_sg,
6846 .set_sg = ethtool_op_set_sg,
6847 #if TG3_TSO_SUPPORT != 0
6848 .get_tso = ethtool_op_get_tso,
6849 .set_tso = tg3_set_tso,
6851 .get_strings = tg3_get_strings,
6852 .get_stats_count = tg3_get_stats_count,
6853 .get_ethtool_stats = tg3_get_ethtool_stats,
6856 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6857 static void __devinit tg3_nvram_init(struct tg3 *tp)
6861 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
6864 tw32_f(GRC_EEPROM_ADDR,
6865 (EEPROM_ADDR_FSM_RESET |
6866 (EEPROM_DEFAULT_CLOCK_PERIOD <<
6867 EEPROM_ADDR_CLKPERD_SHIFT)));
6869 /* XXX schedule_timeout() ... */
6870 for (j = 0; j < 100; j++)
6873 /* Enable seeprom accesses. */
6874 tw32_f(GRC_LOCAL_CTRL,
6875 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6878 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6879 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6883 u32 nvaccess = tr32(NVRAM_ACCESS);
6885 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6888 nvcfg1 = tr32(NVRAM_CFG1);
6890 tp->tg3_flags |= TG3_FLAG_NVRAM;
6891 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6892 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6893 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6895 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6896 tw32(NVRAM_CFG1, nvcfg1);
6899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6900 u32 nvaccess = tr32(NVRAM_ACCESS);
6902 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6905 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6909 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
6910 u32 offset, u32 *val)
6915 if (offset > EEPROM_ADDR_ADDR_MASK ||
6919 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6920 EEPROM_ADDR_DEVID_MASK |
6922 tw32(GRC_EEPROM_ADDR,
6924 (0 << EEPROM_ADDR_DEVID_SHIFT) |
6925 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6926 EEPROM_ADDR_ADDR_MASK) |
6927 EEPROM_ADDR_READ | EEPROM_ADDR_START);
6929 for (i = 0; i < 10000; i++) {
6930 tmp = tr32(GRC_EEPROM_ADDR);
6932 if (tmp & EEPROM_ADDR_COMPLETE)
6936 if (!(tmp & EEPROM_ADDR_COMPLETE))
6939 *val = tr32(GRC_EEPROM_DATA);
6943 static int __devinit tg3_nvram_read(struct tg3 *tp,
6944 u32 offset, u32 *val)
6948 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
6949 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
6953 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6954 return tg3_nvram_read_using_eeprom(tp, offset, val);
6956 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6957 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6958 NVRAM_BUFFERED_PAGE_POS) +
6959 (offset % NVRAM_BUFFERED_PAGE_SIZE);
6961 if (offset > NVRAM_ADDR_MSK)
6966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6967 u32 nvaccess = tr32(NVRAM_ACCESS);
6969 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6972 tw32(NVRAM_ADDR, offset);
6974 NVRAM_CMD_RD | NVRAM_CMD_GO |
6975 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6977 /* Wait for done bit to clear. */
6978 for (i = 0; i < 1000; i++) {
6980 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6982 *val = swab32(tr32(NVRAM_RDDATA));
6987 tg3_nvram_unlock(tp);
6989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6990 u32 nvaccess = tr32(NVRAM_ACCESS);
6992 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7001 struct subsys_tbl_ent {
7002 u16 subsys_vendor, subsys_devid;
7006 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7007 /* Broadcom boards. */
7008 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7009 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7010 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7011 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
7012 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7013 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7014 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
7015 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7016 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7017 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7018 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7021 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7022 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7023 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
7024 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7025 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7028 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7029 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7030 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7031 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7033 /* Compaq boards. */
7034 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7035 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7036 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
7037 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7038 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7041 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7044 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7048 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7049 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7050 tp->pdev->subsystem_vendor) &&
7051 (subsys_id_to_phy_id[i].subsys_devid ==
7052 tp->pdev->subsystem_device))
7053 return &subsys_id_to_phy_id[i];
7058 static int __devinit tg3_phy_probe(struct tg3 *tp)
7060 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7061 u32 hw_phy_id, hw_phy_id_masked;
7063 int eeprom_signature_found, eeprom_phy_serdes, err;
7065 tp->phy_id = PHY_ID_INVALID;
7066 eeprom_phy_id = PHY_ID_INVALID;
7067 eeprom_phy_serdes = 0;
7068 eeprom_signature_found = 0;
7069 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7070 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7071 u32 nic_cfg, led_cfg;
7072 u32 nic_phy_id, cfg2;
7074 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7075 tp->nic_sram_data_cfg = nic_cfg;
7077 eeprom_signature_found = 1;
7079 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7080 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7081 eeprom_phy_serdes = 1;
7083 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7084 if (nic_phy_id != 0) {
7085 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7086 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7088 eeprom_phy_id = (id1 >> 16) << 10;
7089 eeprom_phy_id |= (id2 & 0xfc00) << 16;
7090 eeprom_phy_id |= (id2 & 0x03ff) << 0;
7094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7095 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7096 led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7097 SHASTA_EXT_LED_MODE_MASK);
7099 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7103 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7104 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7107 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7108 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7111 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7112 tp->led_ctrl = LED_CTRL_MODE_MAC;
7115 case SHASTA_EXT_LED_SHARED:
7116 tp->led_ctrl = LED_CTRL_MODE_SHARED;
7117 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7118 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7119 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7120 LED_CTRL_MODE_PHY_2);
7123 case SHASTA_EXT_LED_MAC:
7124 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7127 case SHASTA_EXT_LED_COMBO:
7128 tp->led_ctrl = LED_CTRL_MODE_COMBO;
7129 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7130 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7131 LED_CTRL_MODE_PHY_2);
7136 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7138 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7139 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7141 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7142 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7143 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7144 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7145 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7147 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7148 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7150 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7152 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7153 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7155 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &cfg2);
7156 if (cfg2 & (1 << 17))
7157 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7160 /* Reading the PHY ID register can conflict with ASF
7161 * firwmare access to the PHY hardware.
7164 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7165 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7167 /* Now read the physical PHY_ID from the chip and verify
7168 * that it is sane. If it doesn't look good, we fall back
7169 * to either the hard-coded table based PHY_ID and failing
7170 * that the value found in the eeprom area.
7172 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7173 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7175 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
7176 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7177 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
7179 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7182 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7183 tp->phy_id = hw_phy_id;
7184 if (hw_phy_id_masked == PHY_ID_BCM8002)
7185 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7187 if (eeprom_signature_found) {
7188 tp->phy_id = eeprom_phy_id;
7189 if (eeprom_phy_serdes)
7190 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7192 struct subsys_tbl_ent *p;
7194 /* No eeprom signature? Try the hardcoded
7195 * subsys device table.
7197 p = lookup_by_subsys(tp);
7201 tp->phy_id = p->phy_id;
7203 tp->phy_id == PHY_ID_BCM8002)
7204 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7208 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7209 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7210 u32 bmsr, adv_reg, tg3_ctrl;
7212 tg3_readphy(tp, MII_BMSR, &bmsr);
7213 tg3_readphy(tp, MII_BMSR, &bmsr);
7215 if (bmsr & BMSR_LSTATUS)
7216 goto skip_phy_reset;
7218 err = tg3_phy_reset(tp);
7222 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7223 ADVERTISE_100HALF | ADVERTISE_100FULL |
7224 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7226 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7227 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7228 MII_TG3_CTRL_ADV_1000_FULL);
7229 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7230 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7231 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7232 MII_TG3_CTRL_ENABLE_AS_MASTER);
7235 if (!tg3_copper_is_advertising_all(tp)) {
7236 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7238 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7239 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7241 tg3_writephy(tp, MII_BMCR,
7242 BMCR_ANENABLE | BMCR_ANRESTART);
7244 tg3_phy_set_wirespeed(tp);
7246 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7247 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7248 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7252 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7253 err = tg3_init_5401phy_dsp(tp);
7258 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7259 err = tg3_init_5401phy_dsp(tp);
7262 if (!eeprom_signature_found)
7263 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7265 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7266 tp->link_config.advertising =
7267 (ADVERTISED_1000baseT_Half |
7268 ADVERTISED_1000baseT_Full |
7269 ADVERTISED_Autoneg |
7271 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7272 tp->link_config.advertising &=
7273 ~(ADVERTISED_1000baseT_Half |
7274 ADVERTISED_1000baseT_Full);
7279 static void __devinit tg3_read_partno(struct tg3 *tp)
7281 unsigned char vpd_data[256];
7284 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7285 /* Sun decided not to put the necessary bits in the
7286 * NVRAM of their onboard tg3 parts :(
7288 strcpy(tp->board_part_number, "Sun 570X");
7292 for (i = 0; i < 256; i += 4) {
7295 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7298 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
7299 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
7300 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7301 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7304 /* Now parse and find the part number. */
7305 for (i = 0; i < 256; ) {
7306 unsigned char val = vpd_data[i];
7309 if (val == 0x82 || val == 0x91) {
7312 (vpd_data[i + 2] << 8)));
7319 block_end = (i + 3 +
7321 (vpd_data[i + 2] << 8)));
7323 while (i < block_end) {
7324 if (vpd_data[i + 0] == 'P' &&
7325 vpd_data[i + 1] == 'N') {
7326 int partno_len = vpd_data[i + 2];
7328 if (partno_len > 24)
7331 memcpy(tp->board_part_number,
7340 /* Part number not found. */
7345 strcpy(tp->board_part_number, "none");
7348 #ifdef CONFIG_SPARC64
7349 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7351 struct pci_dev *pdev = tp->pdev;
7352 struct pcidev_cookie *pcp = pdev->sysdata;
7355 int node = pcp->prom_node;
7359 err = prom_getproperty(node, "subsystem-vendor-id",
7360 (char *) &venid, sizeof(venid));
7361 if (err == 0 || err == -1)
7363 if (venid == PCI_VENDOR_ID_SUN)
7370 static int __devinit tg3_get_invariants(struct tg3 *tp)
7373 u32 cacheline_sz_reg;
7374 u32 pci_state_reg, grc_misc_cfg;
7379 #ifdef CONFIG_SPARC64
7380 if (tg3_is_sun_570X(tp))
7381 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7384 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7385 * reordering to the mailbox registers done by the host
7386 * controller can cause major troubles. We read back from
7387 * every mailbox register write to force the writes to be
7388 * posted to the chip in order.
7390 if (pci_find_device(PCI_VENDOR_ID_INTEL,
7391 PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7392 pci_find_device(PCI_VENDOR_ID_INTEL,
7393 PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7394 pci_find_device(PCI_VENDOR_ID_INTEL,
7395 PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7396 pci_find_device(PCI_VENDOR_ID_INTEL,
7397 PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7398 pci_find_device(PCI_VENDOR_ID_AMD,
7399 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7400 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7402 /* Force memory write invalidate off. If we leave it on,
7403 * then on 5700_BX chips we have to enable a workaround.
7404 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7405 * to match the cacheline size. The Broadcom driver have this
7406 * workaround but turns MWI off all the times so never uses
7407 * it. This seems to suggest that the workaround is insufficient.
7409 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7410 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7411 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7413 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7414 * has the register indirect write enable bit set before
7415 * we try to access any of the MMIO registers. It is also
7416 * critical that the PCI-X hw workaround situation is decided
7417 * before that as well.
7419 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7422 tp->pci_chip_rev_id = (misc_ctrl_reg >>
7423 MISC_HOST_CTRL_CHIPREV_SHIFT);
7425 /* Initialize misc host control in PCI block. */
7426 tp->misc_host_ctrl |= (misc_ctrl_reg &
7427 MISC_HOST_CTRL_CHIPREV);
7428 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7429 tp->misc_host_ctrl);
7431 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7434 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
7435 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
7436 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
7437 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
7439 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7440 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7443 tp->pci_lat_timer < 64) {
7444 tp->pci_lat_timer = 64;
7446 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
7447 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
7448 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
7449 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
7451 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7455 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7458 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7459 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7461 /* If this is a 5700 BX chipset, and we are in PCI-X
7462 * mode, enable register write workaround.
7464 * The workaround is to use indirect register accesses
7465 * for all chip writes not to mailbox registers.
7467 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7471 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7473 /* The chip can have it's power management PCI config
7474 * space registers clobbered due to this bug.
7475 * So explicitly force the chip into D0 here.
7477 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7479 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7480 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7481 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7484 /* Also, force SERR#/PERR# in PCI command. */
7485 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7486 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7487 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7491 /* Back to back register writes can cause problems on this chip,
7492 * the workaround is to read back all reg writes except those to
7493 * mailbox regs. See tg3_write_indirect_reg32().
7495 * PCI Express 5750_A0 rev chips need this workaround too.
7497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7498 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7499 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7500 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7502 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7503 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7504 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7505 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7507 /* Chip-specific fixup from Broadcom driver */
7508 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7509 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7510 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7511 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7514 /* Force the chip into D0. */
7515 err = tg3_set_power_state(tp, 0);
7517 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7518 pci_name(tp->pdev));
7522 /* 5700 B0 chips do not support checksumming correctly due
7525 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7526 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7528 /* Pseudo-header checksum is done by hardware logic and not
7529 * the offload processers, so make the chip do the pseudo-
7530 * header checksums on receive. For transmit it is more
7531 * convenient to do the pseudo-header checksum in software
7532 * as Linux does that on transmit for us in all cases.
7534 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7535 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7537 /* Derive initial jumbo mode from MTU assigned in
7538 * ether_setup() via the alloc_etherdev() call
7540 if (tp->dev->mtu > ETH_DATA_LEN)
7541 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7543 /* Determine WakeOnLan speed to use. */
7544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7545 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7546 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7547 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7548 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7550 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7553 /* A few boards don't want Ethernet@WireSpeed phy feature */
7554 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7555 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7556 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7557 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7558 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7560 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7561 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7562 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7563 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7564 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7568 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7570 /* Only 5701 and later support tagged irq status mode.
7571 * Also, 5788 chips cannot use tagged irq status.
7573 * However, since we are using NAPI avoid tagged irq status
7574 * because the interrupt condition is more difficult to
7575 * fully clear in that mode.
7577 tp->coalesce_mode = 0;
7579 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7580 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7581 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7583 /* Initialize MAC MI mode, polling disabled. */
7584 tw32_f(MAC_MI_MODE, tp->mi_mode);
7587 /* Initialize data/descriptor byte/word swapping. */
7588 val = tr32(GRC_MODE);
7589 val &= GRC_MODE_HOST_STACKUP;
7590 tw32(GRC_MODE, val | tp->grc_mode);
7592 tg3_switch_clocks(tp);
7594 /* Clear this out for sanity. */
7595 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7597 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7599 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7600 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7601 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7603 if (chiprevid == CHIPREV_ID_5701_A0 ||
7604 chiprevid == CHIPREV_ID_5701_B0 ||
7605 chiprevid == CHIPREV_ID_5701_B2 ||
7606 chiprevid == CHIPREV_ID_5701_B5) {
7607 void __iomem *sram_base;
7609 /* Write some dummy words into the SRAM status block
7610 * area, see if it reads back correctly. If the return
7611 * value is bad, force enable the PCIX workaround.
7613 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7615 writel(0x00000000, sram_base);
7616 writel(0x00000000, sram_base + 4);
7617 writel(0xffffffff, sram_base + 4);
7618 if (readl(sram_base) != 0x00000000)
7619 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7626 grc_misc_cfg = tr32(GRC_MISC_CFG);
7627 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7629 /* Broadcom's driver says that CIOBE multisplit has a bug */
7631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7632 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7633 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7634 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7638 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7639 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7640 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7642 /* these are limited to 10/100 only */
7643 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7644 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7645 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7646 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7647 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7648 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7649 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7650 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7651 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
7652 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
7653 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7655 err = tg3_phy_probe(tp);
7657 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7658 pci_name(tp->pdev), err);
7659 /* ... but do not return immediately ... */
7662 tg3_read_partno(tp);
7664 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7665 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7668 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7670 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7673 /* 5700 {AX,BX} chips have a broken status block link
7674 * change bit implementation, so we must use the
7675 * status register in those cases.
7677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7678 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7680 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7682 /* The led_ctrl is set during tg3_phy_probe, here we might
7683 * have to force the link status polling mechanism based
7684 * upon subsystem IDs.
7686 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7687 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7688 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7689 TG3_FLAG_USE_LINKCHG_REG);
7692 /* For all SERDES we poll the MAC status register. */
7693 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7694 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7696 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7698 /* 5700 BX chips need to have their TX producer index mailboxes
7699 * written twice to workaround a bug.
7701 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7702 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7704 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7706 /* It seems all chips can get confused if TX buffers
7707 * straddle the 4GB address boundary in some cases.
7709 tp->dev->hard_start_xmit = tg3_start_xmit;
7712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7713 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7716 /* By default, disable wake-on-lan. User can change this
7717 * using ETHTOOL_SWOL.
7719 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7724 #ifdef CONFIG_SPARC64
7725 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7727 struct net_device *dev = tp->dev;
7728 struct pci_dev *pdev = tp->pdev;
7729 struct pcidev_cookie *pcp = pdev->sysdata;
7732 int node = pcp->prom_node;
7734 if (prom_getproplen(node, "local-mac-address") == 6) {
7735 prom_getproperty(node, "local-mac-address",
7743 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7745 struct net_device *dev = tp->dev;
7747 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7752 static int __devinit tg3_get_device_address(struct tg3 *tp)
7754 struct net_device *dev = tp->dev;
7755 u32 hi, lo, mac_offset;
7757 #ifdef CONFIG_SPARC64
7758 if (!tg3_get_macaddr_sparc(tp))
7763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7764 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
7765 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7767 if (tg3_nvram_lock(tp))
7768 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7770 tg3_nvram_unlock(tp);
7773 /* First try to get it from MAC address mailbox. */
7774 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7775 if ((hi >> 16) == 0x484b) {
7776 dev->dev_addr[0] = (hi >> 8) & 0xff;
7777 dev->dev_addr[1] = (hi >> 0) & 0xff;
7779 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7780 dev->dev_addr[2] = (lo >> 24) & 0xff;
7781 dev->dev_addr[3] = (lo >> 16) & 0xff;
7782 dev->dev_addr[4] = (lo >> 8) & 0xff;
7783 dev->dev_addr[5] = (lo >> 0) & 0xff;
7785 /* Next, try NVRAM. */
7786 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
7787 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7788 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7789 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7790 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7791 dev->dev_addr[2] = ((lo >> 0) & 0xff);
7792 dev->dev_addr[3] = ((lo >> 8) & 0xff);
7793 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7794 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7796 /* Finally just fetch it out of the MAC control regs. */
7798 hi = tr32(MAC_ADDR_0_HIGH);
7799 lo = tr32(MAC_ADDR_0_LOW);
7801 dev->dev_addr[5] = lo & 0xff;
7802 dev->dev_addr[4] = (lo >> 8) & 0xff;
7803 dev->dev_addr[3] = (lo >> 16) & 0xff;
7804 dev->dev_addr[2] = (lo >> 24) & 0xff;
7805 dev->dev_addr[1] = hi & 0xff;
7806 dev->dev_addr[0] = (hi >> 8) & 0xff;
7809 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7810 #ifdef CONFIG_SPARC64
7811 if (!tg3_get_default_macaddr_sparc(tp))
7819 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7821 struct tg3_internal_buffer_desc test_desc;
7825 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7827 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7828 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7829 tw32(RDMAC_STATUS, 0);
7830 tw32(WDMAC_STATUS, 0);
7832 tw32(BUFMGR_MODE, 0);
7835 test_desc.addr_hi = ((u64) buf_dma) >> 32;
7836 test_desc.addr_lo = buf_dma & 0xffffffff;
7837 test_desc.nic_mbuf = 0x00002100;
7838 test_desc.len = size;
7841 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7842 * the *second* time the tg3 driver was getting loaded after an
7845 * Broadcom tells me:
7846 * ...the DMA engine is connected to the GRC block and a DMA
7847 * reset may affect the GRC block in some unpredictable way...
7848 * The behavior of resets to individual blocks has not been tested.
7850 * Broadcom noted the GRC reset will also reset all sub-components.
7853 test_desc.cqid_sqid = (13 << 8) | 2;
7855 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7858 test_desc.cqid_sqid = (16 << 8) | 7;
7860 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7863 test_desc.flags = 0x00000005;
7865 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7868 val = *(((u32 *)&test_desc) + i);
7869 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7870 sram_dma_descs + (i * sizeof(u32)));
7871 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7873 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7876 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7878 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7882 for (i = 0; i < 40; i++) {
7886 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7888 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7889 if ((val & 0xffff) == sram_dma_descs) {
7900 #define TEST_BUFFER_SIZE 0x400
7902 static int __devinit tg3_test_dma(struct tg3 *tp)
7908 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7914 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7915 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7921 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7924 cacheline_size = 1024;
7926 cacheline_size = (int) byte * 4;
7928 switch (cacheline_size) {
7933 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7934 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7936 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7938 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7940 ~(DMA_RWCTRL_PCI_WRITE_CMD);
7942 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7947 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7948 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7950 DMA_RWCTRL_WRITE_BNDRY_256;
7951 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7953 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7958 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7959 /* DMA read watermark not used on PCIE */
7960 tp->dma_rwctrl |= 0x00180000;
7961 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7964 tp->dma_rwctrl |= 0x003f0000;
7966 tp->dma_rwctrl |= 0x003f000f;
7968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7970 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7972 if (ccval == 0x6 || ccval == 0x7)
7973 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7975 /* Set bit 23 to renable PCIX hw bug fix */
7976 tp->dma_rwctrl |= 0x009f0000;
7978 tp->dma_rwctrl |= 0x001b000f;
7982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7984 tp->dma_rwctrl &= 0xfffffff0;
7986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7988 /* Remove this if it causes problems for some boards. */
7989 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7991 /* On 5700/5701 chips, we need to set this bit.
7992 * Otherwise the chip will issue cacheline transactions
7993 * to streamable DMA memory with not all the byte
7994 * enables turned on. This is an error on several
7995 * RISC PCI controllers, in particular sparc64.
7997 * On 5703/5704 chips, this bit has been reassigned
7998 * a different meaning. In particular, it is used
7999 * on those chips to enable a PCI-X workaround.
8001 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8004 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8007 /* Unneeded, already done by tg3_get_invariants. */
8008 tg3_switch_clocks(tp);
8012 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8013 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8019 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8022 /* Send the buffer to the chip. */
8023 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8025 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8030 /* validate data reached card RAM correctly. */
8031 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8033 tg3_read_mem(tp, 0x2100 + (i*4), &val);
8034 if (le32_to_cpu(val) != p[i]) {
8035 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
8036 /* ret = -ENODEV here? */
8041 /* Now read it back. */
8042 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8044 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8050 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8054 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8055 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8056 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8057 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8060 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8066 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8074 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8079 static void __devinit tg3_init_link_config(struct tg3 *tp)
8081 tp->link_config.advertising =
8082 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8083 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8084 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8085 ADVERTISED_Autoneg | ADVERTISED_MII);
8086 tp->link_config.speed = SPEED_INVALID;
8087 tp->link_config.duplex = DUPLEX_INVALID;
8088 tp->link_config.autoneg = AUTONEG_ENABLE;
8089 netif_carrier_off(tp->dev);
8090 tp->link_config.active_speed = SPEED_INVALID;
8091 tp->link_config.active_duplex = DUPLEX_INVALID;
8092 tp->link_config.phy_is_low_power = 0;
8093 tp->link_config.orig_speed = SPEED_INVALID;
8094 tp->link_config.orig_duplex = DUPLEX_INVALID;
8095 tp->link_config.orig_autoneg = AUTONEG_INVALID;
8098 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8100 tp->bufmgr_config.mbuf_read_dma_low_water =
8101 DEFAULT_MB_RDMA_LOW_WATER;
8102 tp->bufmgr_config.mbuf_mac_rx_low_water =
8103 DEFAULT_MB_MACRX_LOW_WATER;
8104 tp->bufmgr_config.mbuf_high_water =
8105 DEFAULT_MB_HIGH_WATER;
8107 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8108 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8109 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8110 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8111 tp->bufmgr_config.mbuf_high_water_jumbo =
8112 DEFAULT_MB_HIGH_WATER_JUMBO;
8114 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8115 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8118 static char * __devinit tg3_phy_string(struct tg3 *tp)
8120 switch (tp->phy_id & PHY_ID_MASK) {
8121 case PHY_ID_BCM5400: return "5400";
8122 case PHY_ID_BCM5401: return "5401";
8123 case PHY_ID_BCM5411: return "5411";
8124 case PHY_ID_BCM5701: return "5701";
8125 case PHY_ID_BCM5703: return "5703";
8126 case PHY_ID_BCM5704: return "5704";
8127 case PHY_ID_BCM5705: return "5705";
8128 case PHY_ID_BCM5750: return "5750";
8129 case PHY_ID_BCM8002: return "8002/serdes";
8130 case 0: return "serdes";
8131 default: return "unknown";
8135 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8137 struct pci_dev *peer;
8138 unsigned int func, devnr = tp->pdev->devfn & ~7;
8140 for (func = 0; func < 8; func++) {
8141 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8142 if (peer && peer != tp->pdev)
8146 if (!peer || peer == tp->pdev)
8150 * We don't need to keep the refcount elevated; there's no way
8151 * to remove one half of this device without removing the other
8158 static int __devinit tg3_init_one(struct pci_dev *pdev,
8159 const struct pci_device_id *ent)
8161 static int tg3_version_printed = 0;
8162 unsigned long tg3reg_base, tg3reg_len;
8163 struct net_device *dev;
8165 int i, err, pci_using_dac, pm_cap;
8167 if (tg3_version_printed++ == 0)
8168 printk(KERN_INFO "%s", version);
8170 err = pci_enable_device(pdev);
8172 printk(KERN_ERR PFX "Cannot enable PCI device, "
8177 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8178 printk(KERN_ERR PFX "Cannot find proper PCI device "
8179 "base address, aborting.\n");
8181 goto err_out_disable_pdev;
8184 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8186 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8188 goto err_out_disable_pdev;
8191 pci_set_master(pdev);
8193 /* Find power-management capability. */
8194 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8196 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8199 goto err_out_free_res;
8202 /* Configure DMA attributes. */
8203 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8206 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8208 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8209 "for consistent allocations\n");
8210 goto err_out_free_res;
8213 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8215 printk(KERN_ERR PFX "No usable DMA configuration, "
8217 goto err_out_free_res;
8222 tg3reg_base = pci_resource_start(pdev, 0);
8223 tg3reg_len = pci_resource_len(pdev, 0);
8225 dev = alloc_etherdev(sizeof(*tp));
8227 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8229 goto err_out_free_res;
8232 SET_MODULE_OWNER(dev);
8233 SET_NETDEV_DEV(dev, &pdev->dev);
8236 dev->features |= NETIF_F_HIGHDMA;
8237 dev->features |= NETIF_F_LLTX;
8238 #if TG3_VLAN_TAG_USED
8239 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8240 dev->vlan_rx_register = tg3_vlan_rx_register;
8241 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8244 tp = netdev_priv(dev);
8247 tp->pm_cap = pm_cap;
8248 tp->mac_mode = TG3_DEF_MAC_MODE;
8249 tp->rx_mode = TG3_DEF_RX_MODE;
8250 tp->tx_mode = TG3_DEF_TX_MODE;
8251 tp->mi_mode = MAC_MI_MODE_BASE;
8253 tp->msg_enable = tg3_debug;
8255 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8257 /* The word/byte swap controls here control register access byte
8258 * swapping. DMA data byte swapping is controlled in the GRC_MODE
8261 tp->misc_host_ctrl =
8262 MISC_HOST_CTRL_MASK_PCI_INT |
8263 MISC_HOST_CTRL_WORD_SWAP |
8264 MISC_HOST_CTRL_INDIR_ACCESS |
8265 MISC_HOST_CTRL_PCISTATE_RW;
8267 /* The NONFRM (non-frame) byte/word swap controls take effect
8268 * on descriptor entries, anything which isn't packet data.
8270 * The StrongARM chips on the board (one for tx, one for rx)
8271 * are running in big-endian mode.
8273 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8274 GRC_MODE_WSWAP_NONFRM_DATA);
8276 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8278 spin_lock_init(&tp->lock);
8279 spin_lock_init(&tp->tx_lock);
8280 spin_lock_init(&tp->indirect_lock);
8281 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8283 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8284 if (tp->regs == 0UL) {
8285 printk(KERN_ERR PFX "Cannot map device registers, "
8288 goto err_out_free_dev;
8291 tg3_init_link_config(tp);
8293 tg3_init_bufmgr_config(tp);
8295 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8296 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8297 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8299 dev->open = tg3_open;
8300 dev->stop = tg3_close;
8301 dev->get_stats = tg3_get_stats;
8302 dev->set_multicast_list = tg3_set_rx_mode;
8303 dev->set_mac_address = tg3_set_mac_addr;
8304 dev->do_ioctl = tg3_ioctl;
8305 dev->tx_timeout = tg3_tx_timeout;
8306 dev->poll = tg3_poll;
8307 dev->ethtool_ops = &tg3_ethtool_ops;
8309 dev->watchdog_timeo = TG3_TX_TIMEOUT;
8310 dev->change_mtu = tg3_change_mtu;
8311 dev->irq = pdev->irq;
8312 #ifdef CONFIG_NET_POLL_CONTROLLER
8313 dev->poll_controller = tg3_poll_controller;
8316 err = tg3_get_invariants(tp);
8318 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8320 goto err_out_iounmap;
8323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8324 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8325 tp->bufmgr_config.mbuf_read_dma_low_water =
8326 DEFAULT_MB_RDMA_LOW_WATER_5705;
8327 tp->bufmgr_config.mbuf_mac_rx_low_water =
8328 DEFAULT_MB_MACRX_LOW_WATER_5705;
8329 tp->bufmgr_config.mbuf_high_water =
8330 DEFAULT_MB_HIGH_WATER_5705;
8333 #if TG3_TSO_SUPPORT != 0
8334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8336 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8337 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8338 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8339 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8341 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8344 /* TSO is off by default, user can enable using ethtool. */
8346 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8347 dev->features |= NETIF_F_TSO;
8352 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8353 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8354 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8355 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8356 tp->rx_pending = 63;
8359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8360 tp->pdev_peer = tg3_find_5704_peer(tp);
8362 err = tg3_get_device_address(tp);
8364 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8366 goto err_out_iounmap;
8370 * Reset chip in case UNDI or EFI driver did not shutdown
8371 * DMA self test will enable WDMAC and we'll see (spurious)
8372 * pending DMA on the PCI bus at that point.
8374 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8375 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8376 pci_save_state(tp->pdev);
8377 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8381 err = tg3_test_dma(tp);
8383 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8384 goto err_out_iounmap;
8387 /* Tigon3 can do ipv4 only... and some chips have buggy
8390 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8391 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8392 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8394 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8396 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8397 dev->features &= ~NETIF_F_HIGHDMA;
8399 /* flow control autonegotiation is default behavior */
8400 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8402 err = register_netdev(dev);
8404 printk(KERN_ERR PFX "Cannot register net device, "
8406 goto err_out_iounmap;
8409 pci_set_drvdata(pdev, dev);
8411 /* Now that we have fully setup the chip, save away a snapshot
8412 * of the PCI config space. We need to restore this after
8413 * GRC_MISC_CFG core clock resets and some resume events.
8415 pci_save_state(tp->pdev);
8417 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8419 tp->board_part_number,
8420 tp->pci_chip_rev_id,
8422 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8423 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8424 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8425 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8426 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8427 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8429 for (i = 0; i < 6; i++)
8430 printk("%2.2x%c", dev->dev_addr[i],
8431 i == 5 ? '\n' : ':');
8433 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8434 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8437 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8438 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8439 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8440 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8441 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8442 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8443 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8454 pci_release_regions(pdev);
8456 err_out_disable_pdev:
8457 pci_disable_device(pdev);
8458 pci_set_drvdata(pdev, NULL);
8462 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8464 struct net_device *dev = pci_get_drvdata(pdev);
8467 struct tg3 *tp = netdev_priv(dev);
8469 unregister_netdev(dev);
8472 pci_release_regions(pdev);
8473 pci_disable_device(pdev);
8474 pci_set_drvdata(pdev, NULL);
8478 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8480 struct net_device *dev = pci_get_drvdata(pdev);
8481 struct tg3 *tp = netdev_priv(dev);
8484 if (!netif_running(dev))
8489 del_timer_sync(&tp->timer);
8491 spin_lock_irq(&tp->lock);
8492 spin_lock(&tp->tx_lock);
8493 tg3_disable_ints(tp);
8494 spin_unlock(&tp->tx_lock);
8495 spin_unlock_irq(&tp->lock);
8497 netif_device_detach(dev);
8499 spin_lock_irq(&tp->lock);
8500 spin_lock(&tp->tx_lock);
8502 spin_unlock(&tp->tx_lock);
8503 spin_unlock_irq(&tp->lock);
8505 err = tg3_set_power_state(tp, state);
8507 spin_lock_irq(&tp->lock);
8508 spin_lock(&tp->tx_lock);
8512 tp->timer.expires = jiffies + tp->timer_offset;
8513 add_timer(&tp->timer);
8515 netif_device_attach(dev);
8516 tg3_netif_start(tp);
8518 spin_unlock(&tp->tx_lock);
8519 spin_unlock_irq(&tp->lock);
8525 static int tg3_resume(struct pci_dev *pdev)
8527 struct net_device *dev = pci_get_drvdata(pdev);
8528 struct tg3 *tp = netdev_priv(dev);
8531 if (!netif_running(dev))
8534 pci_restore_state(tp->pdev);
8536 err = tg3_set_power_state(tp, 0);
8540 netif_device_attach(dev);
8542 spin_lock_irq(&tp->lock);
8543 spin_lock(&tp->tx_lock);
8547 tp->timer.expires = jiffies + tp->timer_offset;
8548 add_timer(&tp->timer);
8550 tg3_enable_ints(tp);
8552 tg3_netif_start(tp);
8554 spin_unlock(&tp->tx_lock);
8555 spin_unlock_irq(&tp->lock);
8560 static struct pci_driver tg3_driver = {
8561 .name = DRV_MODULE_NAME,
8562 .id_table = tg3_pci_tbl,
8563 .probe = tg3_init_one,
8564 .remove = __devexit_p(tg3_remove_one),
8565 .suspend = tg3_suspend,
8566 .resume = tg3_resume
8569 static int __init tg3_init(void)
8571 return pci_module_init(&tg3_driver);
8574 static void __exit tg3_cleanup(void)
8576 pci_unregister_driver(&tg3_driver);
8579 module_init(tg3_init);
8580 module_exit(tg3_cleanup);