2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9 * Copyright (C) 2000-2003 Broadcom Corporation.
12 #include <linux/config.h>
14 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
34 #include <net/checksum.h>
36 #include <asm/system.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
50 #define TG3_VLAN_TAG_USED 0
54 #define TG3_TSO_SUPPORT 1
56 #define TG3_TSO_SUPPORT 0
61 #define DRV_MODULE_NAME "tg3"
62 #define PFX DRV_MODULE_NAME ": "
63 #define DRV_MODULE_VERSION "3.10"
64 #define DRV_MODULE_RELDATE "September 14, 2004"
66 #define TG3_DEF_MAC_MODE 0
67 #define TG3_DEF_RX_MODE 0
68 #define TG3_DEF_TX_MODE 0
69 #define TG3_DEF_MSG_ENABLE \
79 /* length of time before we decide the hardware is borked,
80 * and dev->tx_timeout() should be called to fix the problem
82 #define TG3_TX_TIMEOUT (5 * HZ)
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU 60
86 #define TG3_MAX_MTU(tp) \
87 ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91 * You can't change the ring sizes, but you can change where you place
92 * them in the NIC onboard memory.
94 #define TG3_RX_RING_SIZE 512
95 #define TG3_DEF_RX_RING_PENDING 200
96 #define TG3_RX_JUMBO_RING_SIZE 256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
99 /* Do not place this n-ring entries value into the tp struct itself,
100 * we really want to expose these constants to GCC so that modulo et
101 * al. operations are done with shifts and masks instead of with
102 * hw multiply/modulo instructions. Another solution would be to
103 * replace things like '% foo' with '& (foo - 1)'.
105 #define TG3_RX_RCB_RING_SIZE(tp) \
106 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
110 #define TG3_TX_RING_SIZE 512
111 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 #define TX_RING_GAP(TP) \
122 (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP) \
124 (((TP)->tx_cons <= (TP)->tx_prod) ? \
125 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
126 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138 static char version[] __devinitdata =
139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_PARM(tg3_debug, "i");
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146 MODULE_VERSION(DRV_MODULE_VERSION);
148 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
150 static struct pci_device_id tg3_pci_tbl[] = {
151 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
229 char string[ETH_GSTRING_LEN];
230 } ethtool_stats_keys[TG3_NUM_STATS] = {
233 { "rx_ucast_packets" },
234 { "rx_mcast_packets" },
235 { "rx_bcast_packets" },
237 { "rx_align_errors" },
238 { "rx_xon_pause_rcvd" },
239 { "rx_xoff_pause_rcvd" },
240 { "rx_mac_ctrl_rcvd" },
241 { "rx_xoff_entered" },
242 { "rx_frame_too_long_errors" },
244 { "rx_undersize_packets" },
245 { "rx_in_length_errors" },
246 { "rx_out_length_errors" },
247 { "rx_64_or_less_octet_packets" },
248 { "rx_65_to_127_octet_packets" },
249 { "rx_128_to_255_octet_packets" },
250 { "rx_256_to_511_octet_packets" },
251 { "rx_512_to_1023_octet_packets" },
252 { "rx_1024_to_1522_octet_packets" },
253 { "rx_1523_to_2047_octet_packets" },
254 { "rx_2048_to_4095_octet_packets" },
255 { "rx_4096_to_8191_octet_packets" },
256 { "rx_8192_to_9022_octet_packets" },
263 { "tx_flow_control" },
265 { "tx_single_collisions" },
266 { "tx_mult_collisions" },
268 { "tx_excessive_collisions" },
269 { "tx_late_collisions" },
270 { "tx_collide_2times" },
271 { "tx_collide_3times" },
272 { "tx_collide_4times" },
273 { "tx_collide_5times" },
274 { "tx_collide_6times" },
275 { "tx_collide_7times" },
276 { "tx_collide_8times" },
277 { "tx_collide_9times" },
278 { "tx_collide_10times" },
279 { "tx_collide_11times" },
280 { "tx_collide_12times" },
281 { "tx_collide_13times" },
282 { "tx_collide_14times" },
283 { "tx_collide_15times" },
284 { "tx_ucast_packets" },
285 { "tx_mcast_packets" },
286 { "tx_bcast_packets" },
287 { "tx_carrier_sense_errors" },
291 { "dma_writeq_full" },
292 { "dma_write_prioq_full" },
296 { "rx_threshold_hit" },
298 { "dma_readq_full" },
299 { "dma_read_prioq_full" },
300 { "tx_comp_queue_full" },
302 { "ring_set_send_prod_index" },
303 { "ring_status_update" },
305 { "nic_avoided_irqs" },
306 { "nic_tx_threshold_hit" }
309 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
311 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
314 spin_lock_irqsave(&tp->indirect_lock, flags);
315 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
316 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
317 spin_unlock_irqrestore(&tp->indirect_lock, flags);
319 writel(val, tp->regs + off);
320 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
321 readl(tp->regs + off);
325 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
327 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
330 spin_lock_irqsave(&tp->indirect_lock, flags);
331 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
332 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
333 spin_unlock_irqrestore(&tp->indirect_lock, flags);
335 void __iomem *dest = tp->regs + off;
337 readl(dest); /* always flush PCI write */
341 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
343 void __iomem *mbox = tp->regs + off;
345 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
349 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
351 void __iomem *mbox = tp->regs + off;
353 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
355 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
359 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
360 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
361 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
363 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
364 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
365 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
366 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
367 #define tr32(reg) readl(tp->regs + (reg))
368 #define tr16(reg) readw(tp->regs + (reg))
369 #define tr8(reg) readb(tp->regs + (reg))
371 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
375 spin_lock_irqsave(&tp->indirect_lock, flags);
376 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
377 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
379 /* Always leave this as zero. */
380 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
381 spin_unlock_irqrestore(&tp->indirect_lock, flags);
384 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
388 spin_lock_irqsave(&tp->indirect_lock, flags);
389 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
390 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
392 /* Always leave this as zero. */
393 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
394 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 static void tg3_disable_ints(struct tg3 *tp)
399 tw32(TG3PCI_MISC_HOST_CTRL,
400 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
401 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
402 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
405 static inline void tg3_cond_int(struct tg3 *tp)
407 if (tp->hw_status->status & SD_STATUS_UPDATED)
408 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
411 static void tg3_enable_ints(struct tg3 *tp)
413 tw32(TG3PCI_MISC_HOST_CTRL,
414 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
415 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
416 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
421 static inline void tg3_netif_stop(struct tg3 *tp)
423 netif_poll_disable(tp->dev);
424 netif_tx_disable(tp->dev);
427 static inline void tg3_netif_start(struct tg3 *tp)
429 netif_wake_queue(tp->dev);
430 /* NOTE: unconditional netif_wake_queue is only appropriate
431 * so long as all callers are assured to have free tx slots
432 * (such as after tg3_init_hw)
434 netif_poll_enable(tp->dev);
438 static void tg3_switch_clocks(struct tg3 *tp)
440 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
443 orig_clock_ctrl = clock_ctrl;
444 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
445 CLOCK_CTRL_CLKRUN_OENABLE |
447 tp->pci_clock_ctrl = clock_ctrl;
449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
451 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
452 tw32_f(TG3PCI_CLOCK_CTRL,
453 clock_ctrl | CLOCK_CTRL_625_CORE);
456 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
457 tw32_f(TG3PCI_CLOCK_CTRL,
459 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
461 tw32_f(TG3PCI_CLOCK_CTRL,
462 clock_ctrl | (CLOCK_CTRL_ALTCLK));
465 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
469 #define PHY_BUSY_LOOPS 5000
471 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
476 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
478 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
484 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
485 MI_COM_PHY_ADDR_MASK);
486 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
487 MI_COM_REG_ADDR_MASK);
488 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
490 tw32_f(MAC_MI_COM, frame_val);
492 loops = PHY_BUSY_LOOPS;
493 while (loops-- > 0) {
495 frame_val = tr32(MAC_MI_COM);
497 if ((frame_val & MI_COM_BUSY) == 0) {
499 frame_val = tr32(MAC_MI_COM);
506 *val = frame_val & MI_COM_DATA_MASK;
510 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
511 tw32_f(MAC_MI_MODE, tp->mi_mode);
518 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
523 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
525 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
529 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
530 MI_COM_PHY_ADDR_MASK);
531 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
532 MI_COM_REG_ADDR_MASK);
533 frame_val |= (val & MI_COM_DATA_MASK);
534 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
536 tw32_f(MAC_MI_COM, frame_val);
538 loops = PHY_BUSY_LOOPS;
539 while (loops-- > 0) {
541 frame_val = tr32(MAC_MI_COM);
542 if ((frame_val & MI_COM_BUSY) == 0) {
544 frame_val = tr32(MAC_MI_COM);
553 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
554 tw32_f(MAC_MI_MODE, tp->mi_mode);
561 static void tg3_phy_set_wirespeed(struct tg3 *tp)
565 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
568 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
569 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
570 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
573 static int tg3_bmcr_reset(struct tg3 *tp)
578 /* OK, reset it, and poll the BMCR_RESET bit until it
579 * clears or we time out.
581 phy_control = BMCR_RESET;
582 err = tg3_writephy(tp, MII_BMCR, phy_control);
588 err = tg3_readphy(tp, MII_BMCR, &phy_control);
592 if ((phy_control & BMCR_RESET) == 0) {
604 static int tg3_wait_macro_done(struct tg3 *tp)
611 tg3_readphy(tp, 0x16, &tmp32);
612 if ((tmp32 & 0x1000) == 0)
621 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
623 static const u32 test_pat[4][6] = {
624 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
625 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
626 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
627 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
631 for (chan = 0; chan < 4; chan++) {
634 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
635 (chan * 0x2000) | 0x0200);
636 tg3_writephy(tp, 0x16, 0x0002);
638 for (i = 0; i < 6; i++)
639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
642 tg3_writephy(tp, 0x16, 0x0202);
643 if (tg3_wait_macro_done(tp)) {
648 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
649 (chan * 0x2000) | 0x0200);
650 tg3_writephy(tp, 0x16, 0x0082);
651 if (tg3_wait_macro_done(tp)) {
656 tg3_writephy(tp, 0x16, 0x0802);
657 if (tg3_wait_macro_done(tp)) {
662 for (i = 0; i < 6; i += 2) {
665 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
666 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
667 if (tg3_wait_macro_done(tp)) {
673 if (low != test_pat[chan][i] ||
674 high != test_pat[chan][i+1]) {
675 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
676 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
677 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
687 static int tg3_phy_reset_chanpat(struct tg3 *tp)
691 for (chan = 0; chan < 4; chan++) {
694 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
695 (chan * 0x2000) | 0x0200);
696 tg3_writephy(tp, 0x16, 0x0002);
697 for (i = 0; i < 6; i++)
698 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
699 tg3_writephy(tp, 0x16, 0x0202);
700 if (tg3_wait_macro_done(tp))
707 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
709 u32 reg32, phy9_orig;
710 int retries, do_phy_reset, err;
716 err = tg3_bmcr_reset(tp);
722 /* Disable transmitter and interrupt. */
723 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
725 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
727 /* Set full-duplex, 1000 mbps. */
728 tg3_writephy(tp, MII_BMCR,
729 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
731 /* Set to master mode. */
732 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
733 tg3_writephy(tp, MII_TG3_CTRL,
734 (MII_TG3_CTRL_AS_MASTER |
735 MII_TG3_CTRL_ENABLE_AS_MASTER));
737 /* Enable SM_DSP_CLOCK and 6dB. */
738 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
740 /* Block the PHY control access. */
741 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
742 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
744 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
749 err = tg3_phy_reset_chanpat(tp);
753 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
754 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
756 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
757 tg3_writephy(tp, 0x16, 0x0000);
759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
761 /* Set Extended packet length bit for jumbo frames */
762 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
765 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
768 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
770 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
772 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
777 /* This will reset the tigon3 PHY if there is no valid
778 * link unless the FORCE argument is non-zero.
780 static int tg3_phy_reset(struct tg3 *tp)
785 err = tg3_readphy(tp, MII_BMSR, &phy_status);
786 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
793 err = tg3_phy_reset_5703_4_5(tp);
799 err = tg3_bmcr_reset(tp);
804 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
805 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
806 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
807 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
808 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
809 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
810 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
812 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
813 tg3_writephy(tp, 0x1c, 0x8d68);
814 tg3_writephy(tp, 0x1c, 0x8d68);
816 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
817 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
818 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
819 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
820 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
821 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
822 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
823 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
824 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
826 /* Set Extended packet length bit (bit 14) on all chips that */
827 /* support jumbo frames */
828 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
829 /* Cannot do read-modify-write on 5401 */
830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
831 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
832 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
835 /* Set bit 14 with read-modify-write to preserve other bits */
836 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
837 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
838 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
840 tg3_phy_set_wirespeed(tp);
844 static void tg3_frob_aux_power(struct tg3 *tp)
846 struct tg3 *tp_peer = tp;
848 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
852 tp_peer = pci_get_drvdata(tp->pdev_peer);
858 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
859 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
862 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
863 (GRC_LCLCTRL_GPIO_OE0 |
864 GRC_LCLCTRL_GPIO_OE1 |
865 GRC_LCLCTRL_GPIO_OE2 |
866 GRC_LCLCTRL_GPIO_OUTPUT0 |
867 GRC_LCLCTRL_GPIO_OUTPUT1));
871 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
874 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
875 (GRC_LCLCTRL_GPIO_OE0 |
876 GRC_LCLCTRL_GPIO_OE1 |
877 GRC_LCLCTRL_GPIO_OE2 |
878 GRC_LCLCTRL_GPIO_OUTPUT1 |
879 GRC_LCLCTRL_GPIO_OUTPUT2));
882 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
883 (GRC_LCLCTRL_GPIO_OE0 |
884 GRC_LCLCTRL_GPIO_OE1 |
885 GRC_LCLCTRL_GPIO_OE2 |
886 GRC_LCLCTRL_GPIO_OUTPUT0 |
887 GRC_LCLCTRL_GPIO_OUTPUT1 |
888 GRC_LCLCTRL_GPIO_OUTPUT2));
891 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
892 (GRC_LCLCTRL_GPIO_OE0 |
893 GRC_LCLCTRL_GPIO_OE1 |
894 GRC_LCLCTRL_GPIO_OE2 |
895 GRC_LCLCTRL_GPIO_OUTPUT0 |
896 GRC_LCLCTRL_GPIO_OUTPUT1));
900 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
901 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
903 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
906 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907 (GRC_LCLCTRL_GPIO_OE1 |
908 GRC_LCLCTRL_GPIO_OUTPUT1));
911 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
912 (GRC_LCLCTRL_GPIO_OE1));
915 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
916 (GRC_LCLCTRL_GPIO_OE1 |
917 GRC_LCLCTRL_GPIO_OUTPUT1));
923 static int tg3_setup_phy(struct tg3 *, int);
925 #define RESET_KIND_SHUTDOWN 0
926 #define RESET_KIND_INIT 1
927 #define RESET_KIND_SUSPEND 2
929 static void tg3_write_sig_post_reset(struct tg3 *, int);
931 static int tg3_set_power_state(struct tg3 *tp, int state)
934 u16 power_control, power_caps;
937 /* Make sure register accesses (indirect or otherwise)
938 * will function correctly.
940 pci_write_config_dword(tp->pdev,
941 TG3PCI_MISC_HOST_CTRL,
944 pci_read_config_word(tp->pdev,
947 power_control |= PCI_PM_CTRL_PME_STATUS;
948 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
952 pci_write_config_word(tp->pdev,
955 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
973 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
975 tp->dev->name, state);
979 power_control |= PCI_PM_CTRL_PME_ENABLE;
981 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
982 tw32(TG3PCI_MISC_HOST_CTRL,
983 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
985 if (tp->link_config.phy_is_low_power == 0) {
986 tp->link_config.phy_is_low_power = 1;
987 tp->link_config.orig_speed = tp->link_config.speed;
988 tp->link_config.orig_duplex = tp->link_config.duplex;
989 tp->link_config.orig_autoneg = tp->link_config.autoneg;
992 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
993 tp->link_config.speed = SPEED_10;
994 tp->link_config.duplex = DUPLEX_HALF;
995 tp->link_config.autoneg = AUTONEG_ENABLE;
996 tg3_setup_phy(tp, 0);
999 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1001 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1004 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1005 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1008 mac_mode = MAC_MODE_PORT_MODE_MII;
1010 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1011 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1012 mac_mode |= MAC_MODE_LINK_POLARITY;
1014 mac_mode = MAC_MODE_PORT_MODE_TBI;
1017 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1018 tw32(MAC_LED_CTRL, tp->led_ctrl);
1020 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1021 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1022 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1024 tw32_f(MAC_MODE, mac_mode);
1027 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1031 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1032 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1036 base_val = tp->pci_clock_ctrl;
1037 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1038 CLOCK_CTRL_TXCLK_DISABLE);
1040 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1042 CLOCK_CTRL_PWRDOWN_PLL133);
1044 } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1045 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1046 u32 newbits1, newbits2;
1048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1050 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1051 CLOCK_CTRL_TXCLK_DISABLE |
1053 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1054 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1056 newbits1 = CLOCK_CTRL_625_CORE;
1057 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1059 newbits1 = CLOCK_CTRL_ALTCLK;
1060 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1063 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1066 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1075 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1076 CLOCK_CTRL_TXCLK_DISABLE |
1077 CLOCK_CTRL_44MHZ_CORE);
1079 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1082 tw32_f(TG3PCI_CLOCK_CTRL,
1083 tp->pci_clock_ctrl | newbits3);
1088 tg3_frob_aux_power(tp);
1090 /* Finally, set the new power state. */
1091 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1093 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1098 static void tg3_link_report(struct tg3 *tp)
1100 if (!netif_carrier_ok(tp->dev)) {
1101 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1103 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1105 (tp->link_config.active_speed == SPEED_1000 ?
1107 (tp->link_config.active_speed == SPEED_100 ?
1109 (tp->link_config.active_duplex == DUPLEX_FULL ?
1112 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1115 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1116 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1120 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1122 u32 new_tg3_flags = 0;
1123 u32 old_rx_mode = tp->rx_mode;
1124 u32 old_tx_mode = tp->tx_mode;
1126 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1127 if (local_adv & ADVERTISE_PAUSE_CAP) {
1128 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1129 if (remote_adv & LPA_PAUSE_CAP)
1131 (TG3_FLAG_RX_PAUSE |
1133 else if (remote_adv & LPA_PAUSE_ASYM)
1135 (TG3_FLAG_RX_PAUSE);
1137 if (remote_adv & LPA_PAUSE_CAP)
1139 (TG3_FLAG_RX_PAUSE |
1142 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1143 if ((remote_adv & LPA_PAUSE_CAP) &&
1144 (remote_adv & LPA_PAUSE_ASYM))
1145 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1148 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1149 tp->tg3_flags |= new_tg3_flags;
1151 new_tg3_flags = tp->tg3_flags;
1154 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1155 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1157 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1159 if (old_rx_mode != tp->rx_mode) {
1160 tw32_f(MAC_RX_MODE, tp->rx_mode);
1163 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1164 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1166 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1168 if (old_tx_mode != tp->tx_mode) {
1169 tw32_f(MAC_TX_MODE, tp->tx_mode);
1173 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1175 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1176 case MII_TG3_AUX_STAT_10HALF:
1178 *duplex = DUPLEX_HALF;
1181 case MII_TG3_AUX_STAT_10FULL:
1183 *duplex = DUPLEX_FULL;
1186 case MII_TG3_AUX_STAT_100HALF:
1188 *duplex = DUPLEX_HALF;
1191 case MII_TG3_AUX_STAT_100FULL:
1193 *duplex = DUPLEX_FULL;
1196 case MII_TG3_AUX_STAT_1000HALF:
1197 *speed = SPEED_1000;
1198 *duplex = DUPLEX_HALF;
1201 case MII_TG3_AUX_STAT_1000FULL:
1202 *speed = SPEED_1000;
1203 *duplex = DUPLEX_FULL;
1207 *speed = SPEED_INVALID;
1208 *duplex = DUPLEX_INVALID;
1213 static int tg3_phy_copper_begin(struct tg3 *tp)
1218 if (tp->link_config.phy_is_low_power) {
1219 /* Entering low power mode. Disable gigabit and
1220 * 100baseT advertisements.
1222 tg3_writephy(tp, MII_TG3_CTRL, 0);
1224 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1225 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1226 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1227 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1229 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1230 } else if (tp->link_config.speed == SPEED_INVALID) {
1231 tp->link_config.advertising =
1232 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1233 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1234 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1235 ADVERTISED_Autoneg | ADVERTISED_MII);
1237 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1238 tp->link_config.advertising &=
1239 ~(ADVERTISED_1000baseT_Half |
1240 ADVERTISED_1000baseT_Full);
1242 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1243 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1244 new_adv |= ADVERTISE_10HALF;
1245 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1246 new_adv |= ADVERTISE_10FULL;
1247 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1248 new_adv |= ADVERTISE_100HALF;
1249 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1250 new_adv |= ADVERTISE_100FULL;
1251 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1253 if (tp->link_config.advertising &
1254 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1256 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1257 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1258 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1259 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1260 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1261 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1262 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1263 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1264 MII_TG3_CTRL_ENABLE_AS_MASTER);
1265 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1267 tg3_writephy(tp, MII_TG3_CTRL, 0);
1270 /* Asking for a specific link mode. */
1271 if (tp->link_config.speed == SPEED_1000) {
1272 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1273 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1275 if (tp->link_config.duplex == DUPLEX_FULL)
1276 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1278 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1279 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1280 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1281 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1282 MII_TG3_CTRL_ENABLE_AS_MASTER);
1283 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1285 tg3_writephy(tp, MII_TG3_CTRL, 0);
1287 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1288 if (tp->link_config.speed == SPEED_100) {
1289 if (tp->link_config.duplex == DUPLEX_FULL)
1290 new_adv |= ADVERTISE_100FULL;
1292 new_adv |= ADVERTISE_100HALF;
1294 if (tp->link_config.duplex == DUPLEX_FULL)
1295 new_adv |= ADVERTISE_10FULL;
1297 new_adv |= ADVERTISE_10HALF;
1299 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1303 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1304 tp->link_config.speed != SPEED_INVALID) {
1305 u32 bmcr, orig_bmcr;
1307 tp->link_config.active_speed = tp->link_config.speed;
1308 tp->link_config.active_duplex = tp->link_config.duplex;
1311 switch (tp->link_config.speed) {
1317 bmcr |= BMCR_SPEED100;
1321 bmcr |= TG3_BMCR_SPEED1000;
1325 if (tp->link_config.duplex == DUPLEX_FULL)
1326 bmcr |= BMCR_FULLDPLX;
1328 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1329 if (bmcr != orig_bmcr) {
1330 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1331 for (i = 0; i < 1500; i++) {
1335 tg3_readphy(tp, MII_BMSR, &tmp);
1336 tg3_readphy(tp, MII_BMSR, &tmp);
1337 if (!(tmp & BMSR_LSTATUS)) {
1342 tg3_writephy(tp, MII_BMCR, bmcr);
1346 tg3_writephy(tp, MII_BMCR,
1347 BMCR_ANENABLE | BMCR_ANRESTART);
1353 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1357 /* Turn off tap power management. */
1358 /* Set Extended packet length bit */
1359 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1361 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1362 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1364 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1365 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1367 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1368 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1370 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1371 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1373 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1374 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1381 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1383 u32 adv_reg, all_mask;
1385 tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1386 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1387 ADVERTISE_100HALF | ADVERTISE_100FULL);
1388 if ((adv_reg & all_mask) != all_mask)
1390 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1393 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1394 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1395 MII_TG3_CTRL_ADV_1000_FULL);
1396 if ((tg3_ctrl & all_mask) != all_mask)
1402 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1404 int current_link_up;
1413 (MAC_STATUS_SYNC_CHANGED |
1414 MAC_STATUS_CFG_CHANGED |
1415 MAC_STATUS_MI_COMPLETION |
1416 MAC_STATUS_LNKSTATE_CHANGED));
1419 tp->mi_mode = MAC_MI_MODE_BASE;
1420 tw32_f(MAC_MI_MODE, tp->mi_mode);
1423 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1425 /* Some third-party PHYs need to be reset on link going
1428 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1431 netif_carrier_ok(tp->dev)) {
1432 tg3_readphy(tp, MII_BMSR, &bmsr);
1433 tg3_readphy(tp, MII_BMSR, &bmsr);
1434 if (!(bmsr & BMSR_LSTATUS))
1440 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1441 tg3_readphy(tp, MII_BMSR, &bmsr);
1442 tg3_readphy(tp, MII_BMSR, &bmsr);
1444 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1447 if (!(bmsr & BMSR_LSTATUS)) {
1448 err = tg3_init_5401phy_dsp(tp);
1452 tg3_readphy(tp, MII_BMSR, &bmsr);
1453 for (i = 0; i < 1000; i++) {
1455 tg3_readphy(tp, MII_BMSR, &bmsr);
1456 if (bmsr & BMSR_LSTATUS) {
1462 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1463 !(bmsr & BMSR_LSTATUS) &&
1464 tp->link_config.active_speed == SPEED_1000) {
1465 err = tg3_phy_reset(tp);
1467 err = tg3_init_5401phy_dsp(tp);
1472 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1473 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1474 /* 5701 {A0,B0} CRC bug workaround */
1475 tg3_writephy(tp, 0x15, 0x0a75);
1476 tg3_writephy(tp, 0x1c, 0x8c68);
1477 tg3_writephy(tp, 0x1c, 0x8d68);
1478 tg3_writephy(tp, 0x1c, 0x8c68);
1481 /* Clear pending interrupts... */
1482 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1483 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1485 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1486 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1488 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1492 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1493 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1494 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1496 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1499 current_link_up = 0;
1500 current_speed = SPEED_INVALID;
1501 current_duplex = DUPLEX_INVALID;
1503 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1506 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1507 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1508 if (!(val & (1 << 10))) {
1510 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1516 for (i = 0; i < 100; i++) {
1517 tg3_readphy(tp, MII_BMSR, &bmsr);
1518 tg3_readphy(tp, MII_BMSR, &bmsr);
1519 if (bmsr & BMSR_LSTATUS)
1524 if (bmsr & BMSR_LSTATUS) {
1527 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1528 for (i = 0; i < 2000; i++) {
1530 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1535 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1540 for (i = 0; i < 200; i++) {
1541 tg3_readphy(tp, MII_BMCR, &bmcr);
1542 tg3_readphy(tp, MII_BMCR, &bmcr);
1543 if (bmcr && bmcr != 0x7fff)
1548 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1549 if (bmcr & BMCR_ANENABLE) {
1550 current_link_up = 1;
1552 /* Force autoneg restart if we are exiting
1555 if (!tg3_copper_is_advertising_all(tp))
1556 current_link_up = 0;
1558 current_link_up = 0;
1561 if (!(bmcr & BMCR_ANENABLE) &&
1562 tp->link_config.speed == current_speed &&
1563 tp->link_config.duplex == current_duplex) {
1564 current_link_up = 1;
1566 current_link_up = 0;
1570 tp->link_config.active_speed = current_speed;
1571 tp->link_config.active_duplex = current_duplex;
1574 if (current_link_up == 1 &&
1575 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1576 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1577 u32 local_adv, remote_adv;
1579 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1580 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1582 tg3_readphy(tp, MII_LPA, &remote_adv);
1583 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1585 /* If we are not advertising full pause capability,
1586 * something is wrong. Bring the link down and reconfigure.
1588 if (local_adv != ADVERTISE_PAUSE_CAP) {
1589 current_link_up = 0;
1591 tg3_setup_flow_control(tp, local_adv, remote_adv);
1595 if (current_link_up == 0) {
1598 tg3_phy_copper_begin(tp);
1600 tg3_readphy(tp, MII_BMSR, &tmp);
1601 tg3_readphy(tp, MII_BMSR, &tmp);
1602 if (tmp & BMSR_LSTATUS)
1603 current_link_up = 1;
1606 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1607 if (current_link_up == 1) {
1608 if (tp->link_config.active_speed == SPEED_100 ||
1609 tp->link_config.active_speed == SPEED_10)
1610 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1612 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1614 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1616 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1617 if (tp->link_config.active_duplex == DUPLEX_HALF)
1618 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1620 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1622 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1623 (current_link_up == 1 &&
1624 tp->link_config.active_speed == SPEED_10))
1625 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1627 if (current_link_up == 1)
1628 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1631 /* ??? Without this setting Netgear GA302T PHY does not
1632 * ??? send/receive packets...
1634 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1635 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1636 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1637 tw32_f(MAC_MI_MODE, tp->mi_mode);
1641 tw32_f(MAC_MODE, tp->mac_mode);
1644 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1645 /* Polled via timer. */
1646 tw32_f(MAC_EVENT, 0);
1648 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1653 current_link_up == 1 &&
1654 tp->link_config.active_speed == SPEED_1000 &&
1655 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1656 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1659 (MAC_STATUS_SYNC_CHANGED |
1660 MAC_STATUS_CFG_CHANGED));
1663 NIC_SRAM_FIRMWARE_MBOX,
1664 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1667 if (current_link_up != netif_carrier_ok(tp->dev)) {
1668 if (current_link_up)
1669 netif_carrier_on(tp->dev);
1671 netif_carrier_off(tp->dev);
1672 tg3_link_report(tp);
1678 struct tg3_fiber_aneginfo {
1680 #define ANEG_STATE_UNKNOWN 0
1681 #define ANEG_STATE_AN_ENABLE 1
1682 #define ANEG_STATE_RESTART_INIT 2
1683 #define ANEG_STATE_RESTART 3
1684 #define ANEG_STATE_DISABLE_LINK_OK 4
1685 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1686 #define ANEG_STATE_ABILITY_DETECT 6
1687 #define ANEG_STATE_ACK_DETECT_INIT 7
1688 #define ANEG_STATE_ACK_DETECT 8
1689 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1690 #define ANEG_STATE_COMPLETE_ACK 10
1691 #define ANEG_STATE_IDLE_DETECT_INIT 11
1692 #define ANEG_STATE_IDLE_DETECT 12
1693 #define ANEG_STATE_LINK_OK 13
1694 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1695 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1698 #define MR_AN_ENABLE 0x00000001
1699 #define MR_RESTART_AN 0x00000002
1700 #define MR_AN_COMPLETE 0x00000004
1701 #define MR_PAGE_RX 0x00000008
1702 #define MR_NP_LOADED 0x00000010
1703 #define MR_TOGGLE_TX 0x00000020
1704 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1705 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1706 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1707 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1708 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1709 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1710 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1711 #define MR_TOGGLE_RX 0x00002000
1712 #define MR_NP_RX 0x00004000
1714 #define MR_LINK_OK 0x80000000
1716 unsigned long link_time, cur_time;
1718 u32 ability_match_cfg;
1719 int ability_match_count;
1721 char ability_match, idle_match, ack_match;
1723 u32 txconfig, rxconfig;
1724 #define ANEG_CFG_NP 0x00000080
1725 #define ANEG_CFG_ACK 0x00000040
1726 #define ANEG_CFG_RF2 0x00000020
1727 #define ANEG_CFG_RF1 0x00000010
1728 #define ANEG_CFG_PS2 0x00000001
1729 #define ANEG_CFG_PS1 0x00008000
1730 #define ANEG_CFG_HD 0x00004000
1731 #define ANEG_CFG_FD 0x00002000
1732 #define ANEG_CFG_INVAL 0x00001f06
1737 #define ANEG_TIMER_ENAB 2
1738 #define ANEG_FAILED -1
1740 #define ANEG_STATE_SETTLE_TIME 10000
1742 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1743 struct tg3_fiber_aneginfo *ap)
1745 unsigned long delta;
1749 if (ap->state == ANEG_STATE_UNKNOWN) {
1753 ap->ability_match_cfg = 0;
1754 ap->ability_match_count = 0;
1755 ap->ability_match = 0;
1761 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1762 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1764 if (rx_cfg_reg != ap->ability_match_cfg) {
1765 ap->ability_match_cfg = rx_cfg_reg;
1766 ap->ability_match = 0;
1767 ap->ability_match_count = 0;
1769 if (++ap->ability_match_count > 1) {
1770 ap->ability_match = 1;
1771 ap->ability_match_cfg = rx_cfg_reg;
1774 if (rx_cfg_reg & ANEG_CFG_ACK)
1782 ap->ability_match_cfg = 0;
1783 ap->ability_match_count = 0;
1784 ap->ability_match = 0;
1790 ap->rxconfig = rx_cfg_reg;
1794 case ANEG_STATE_UNKNOWN:
1795 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1796 ap->state = ANEG_STATE_AN_ENABLE;
1799 case ANEG_STATE_AN_ENABLE:
1800 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1801 if (ap->flags & MR_AN_ENABLE) {
1804 ap->ability_match_cfg = 0;
1805 ap->ability_match_count = 0;
1806 ap->ability_match = 0;
1810 ap->state = ANEG_STATE_RESTART_INIT;
1812 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1816 case ANEG_STATE_RESTART_INIT:
1817 ap->link_time = ap->cur_time;
1818 ap->flags &= ~(MR_NP_LOADED);
1820 tw32(MAC_TX_AUTO_NEG, 0);
1821 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1822 tw32_f(MAC_MODE, tp->mac_mode);
1825 ret = ANEG_TIMER_ENAB;
1826 ap->state = ANEG_STATE_RESTART;
1829 case ANEG_STATE_RESTART:
1830 delta = ap->cur_time - ap->link_time;
1831 if (delta > ANEG_STATE_SETTLE_TIME) {
1832 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1834 ret = ANEG_TIMER_ENAB;
1838 case ANEG_STATE_DISABLE_LINK_OK:
1842 case ANEG_STATE_ABILITY_DETECT_INIT:
1843 ap->flags &= ~(MR_TOGGLE_TX);
1844 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1845 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1846 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1847 tw32_f(MAC_MODE, tp->mac_mode);
1850 ap->state = ANEG_STATE_ABILITY_DETECT;
1853 case ANEG_STATE_ABILITY_DETECT:
1854 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1855 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1859 case ANEG_STATE_ACK_DETECT_INIT:
1860 ap->txconfig |= ANEG_CFG_ACK;
1861 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1862 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1863 tw32_f(MAC_MODE, tp->mac_mode);
1866 ap->state = ANEG_STATE_ACK_DETECT;
1869 case ANEG_STATE_ACK_DETECT:
1870 if (ap->ack_match != 0) {
1871 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1872 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1873 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1875 ap->state = ANEG_STATE_AN_ENABLE;
1877 } else if (ap->ability_match != 0 &&
1878 ap->rxconfig == 0) {
1879 ap->state = ANEG_STATE_AN_ENABLE;
1883 case ANEG_STATE_COMPLETE_ACK_INIT:
1884 if (ap->rxconfig & ANEG_CFG_INVAL) {
1888 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1889 MR_LP_ADV_HALF_DUPLEX |
1890 MR_LP_ADV_SYM_PAUSE |
1891 MR_LP_ADV_ASYM_PAUSE |
1892 MR_LP_ADV_REMOTE_FAULT1 |
1893 MR_LP_ADV_REMOTE_FAULT2 |
1894 MR_LP_ADV_NEXT_PAGE |
1897 if (ap->rxconfig & ANEG_CFG_FD)
1898 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1899 if (ap->rxconfig & ANEG_CFG_HD)
1900 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1901 if (ap->rxconfig & ANEG_CFG_PS1)
1902 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1903 if (ap->rxconfig & ANEG_CFG_PS2)
1904 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1905 if (ap->rxconfig & ANEG_CFG_RF1)
1906 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1907 if (ap->rxconfig & ANEG_CFG_RF2)
1908 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1909 if (ap->rxconfig & ANEG_CFG_NP)
1910 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1912 ap->link_time = ap->cur_time;
1914 ap->flags ^= (MR_TOGGLE_TX);
1915 if (ap->rxconfig & 0x0008)
1916 ap->flags |= MR_TOGGLE_RX;
1917 if (ap->rxconfig & ANEG_CFG_NP)
1918 ap->flags |= MR_NP_RX;
1919 ap->flags |= MR_PAGE_RX;
1921 ap->state = ANEG_STATE_COMPLETE_ACK;
1922 ret = ANEG_TIMER_ENAB;
1925 case ANEG_STATE_COMPLETE_ACK:
1926 if (ap->ability_match != 0 &&
1927 ap->rxconfig == 0) {
1928 ap->state = ANEG_STATE_AN_ENABLE;
1931 delta = ap->cur_time - ap->link_time;
1932 if (delta > ANEG_STATE_SETTLE_TIME) {
1933 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1934 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1936 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1937 !(ap->flags & MR_NP_RX)) {
1938 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1946 case ANEG_STATE_IDLE_DETECT_INIT:
1947 ap->link_time = ap->cur_time;
1948 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1949 tw32_f(MAC_MODE, tp->mac_mode);
1952 ap->state = ANEG_STATE_IDLE_DETECT;
1953 ret = ANEG_TIMER_ENAB;
1956 case ANEG_STATE_IDLE_DETECT:
1957 if (ap->ability_match != 0 &&
1958 ap->rxconfig == 0) {
1959 ap->state = ANEG_STATE_AN_ENABLE;
1962 delta = ap->cur_time - ap->link_time;
1963 if (delta > ANEG_STATE_SETTLE_TIME) {
1964 /* XXX another gem from the Broadcom driver :( */
1965 ap->state = ANEG_STATE_LINK_OK;
1969 case ANEG_STATE_LINK_OK:
1970 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1974 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1975 /* ??? unimplemented */
1978 case ANEG_STATE_NEXT_PAGE_WAIT:
1979 /* ??? unimplemented */
1990 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
1993 struct tg3_fiber_aneginfo aninfo;
1994 int status = ANEG_FAILED;
1998 tw32_f(MAC_TX_AUTO_NEG, 0);
2000 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2001 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2004 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2007 memset(&aninfo, 0, sizeof(aninfo));
2008 aninfo.flags |= MR_AN_ENABLE;
2009 aninfo.state = ANEG_STATE_UNKNOWN;
2010 aninfo.cur_time = 0;
2012 while (++tick < 195000) {
2013 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2014 if (status == ANEG_DONE || status == ANEG_FAILED)
2020 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2021 tw32_f(MAC_MODE, tp->mac_mode);
2024 *flags = aninfo.flags;
2026 if (status == ANEG_DONE &&
2027 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2028 MR_LP_ADV_FULL_DUPLEX)))
2034 static void tg3_init_bcm8002(struct tg3 *tp)
2036 u32 mac_status = tr32(MAC_STATUS);
2039 /* Reset when initting first time or we have a link. */
2040 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2041 !(mac_status & MAC_STATUS_PCS_SYNCED))
2044 /* Set PLL lock range. */
2045 tg3_writephy(tp, 0x16, 0x8007);
2048 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2050 /* Wait for reset to complete. */
2051 /* XXX schedule_timeout() ... */
2052 for (i = 0; i < 500; i++)
2055 /* Config mode; select PMA/Ch 1 regs. */
2056 tg3_writephy(tp, 0x10, 0x8411);
2058 /* Enable auto-lock and comdet, select txclk for tx. */
2059 tg3_writephy(tp, 0x11, 0x0a10);
2061 tg3_writephy(tp, 0x18, 0x00a0);
2062 tg3_writephy(tp, 0x16, 0x41ff);
2064 /* Assert and deassert POR. */
2065 tg3_writephy(tp, 0x13, 0x0400);
2067 tg3_writephy(tp, 0x13, 0x0000);
2069 tg3_writephy(tp, 0x11, 0x0a50);
2071 tg3_writephy(tp, 0x11, 0x0a10);
2073 /* Wait for signal to stabilize */
2074 /* XXX schedule_timeout() ... */
2075 for (i = 0; i < 15000; i++)
2078 /* Deselect the channel register so we can read the PHYID
2081 tg3_writephy(tp, 0x10, 0x8011);
2084 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2086 u32 sg_dig_ctrl, sg_dig_status;
2087 u32 serdes_cfg, expected_sg_dig_ctrl;
2088 int workaround, port_a;
2089 int current_link_up;
2092 expected_sg_dig_ctrl = 0;
2095 current_link_up = 0;
2097 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2098 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2100 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2103 serdes_cfg = tr32(MAC_SERDES_CFG) &
2104 ((1 << 23) | (1 << 22) | (1 << 21) | (1 << 20));
2107 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2109 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2110 if (sg_dig_ctrl & (1 << 31)) {
2112 u32 val = serdes_cfg;
2118 tw32_f(MAC_SERDES_CFG, val);
2120 tw32_f(SG_DIG_CTRL, 0x01388400);
2122 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2123 tg3_setup_flow_control(tp, 0, 0);
2124 current_link_up = 1;
2129 /* Want auto-negotiation. */
2130 expected_sg_dig_ctrl = 0x81388400;
2132 /* Pause capability */
2133 expected_sg_dig_ctrl |= (1 << 11);
2135 /* Asymettric pause */
2136 expected_sg_dig_ctrl |= (1 << 12);
2138 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2140 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011880);
2141 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2143 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2145 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2146 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2147 MAC_STATUS_SIGNAL_DET)) {
2150 /* Giver time to negotiate (~200ms) */
2151 for (i = 0; i < 40000; i++) {
2152 sg_dig_status = tr32(SG_DIG_STATUS);
2153 if (sg_dig_status & (0x3))
2157 mac_status = tr32(MAC_STATUS);
2159 if ((sg_dig_status & (1 << 1)) &&
2160 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2161 u32 local_adv, remote_adv;
2163 local_adv = ADVERTISE_PAUSE_CAP;
2165 if (sg_dig_status & (1 << 19))
2166 remote_adv |= LPA_PAUSE_CAP;
2167 if (sg_dig_status & (1 << 20))
2168 remote_adv |= LPA_PAUSE_ASYM;
2170 tg3_setup_flow_control(tp, local_adv, remote_adv);
2171 current_link_up = 1;
2172 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2173 } else if (!(sg_dig_status & (1 << 1))) {
2174 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2175 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2178 u32 val = serdes_cfg;
2185 tw32_f(MAC_SERDES_CFG, val);
2188 tw32_f(SG_DIG_CTRL, 0x01388400);
2191 mac_status = tr32(MAC_STATUS);
2192 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2193 tg3_setup_flow_control(tp, 0, 0);
2194 current_link_up = 1;
2201 return current_link_up;
2204 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2206 int current_link_up = 0;
2208 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2209 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2213 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2217 if (fiber_autoneg(tp, &flags)) {
2218 u32 local_adv, remote_adv;
2220 local_adv = ADVERTISE_PAUSE_CAP;
2222 if (flags & MR_LP_ADV_SYM_PAUSE)
2223 remote_adv |= LPA_PAUSE_CAP;
2224 if (flags & MR_LP_ADV_ASYM_PAUSE)
2225 remote_adv |= LPA_PAUSE_ASYM;
2227 tg3_setup_flow_control(tp, local_adv, remote_adv);
2229 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2230 current_link_up = 1;
2232 for (i = 0; i < 30; i++) {
2235 (MAC_STATUS_SYNC_CHANGED |
2236 MAC_STATUS_CFG_CHANGED));
2238 if ((tr32(MAC_STATUS) &
2239 (MAC_STATUS_SYNC_CHANGED |
2240 MAC_STATUS_CFG_CHANGED)) == 0)
2244 mac_status = tr32(MAC_STATUS);
2245 if (current_link_up == 0 &&
2246 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2247 !(mac_status & MAC_STATUS_RCVD_CFG))
2248 current_link_up = 1;
2250 /* Forcing 1000FD link up. */
2251 current_link_up = 1;
2252 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2254 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2259 return current_link_up;
2262 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2265 u16 orig_active_speed;
2266 u8 orig_active_duplex;
2268 int current_link_up;
2272 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2273 TG3_FLAG_TX_PAUSE));
2274 orig_active_speed = tp->link_config.active_speed;
2275 orig_active_duplex = tp->link_config.active_duplex;
2277 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2278 netif_carrier_ok(tp->dev) &&
2279 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2280 mac_status = tr32(MAC_STATUS);
2281 mac_status &= (MAC_STATUS_PCS_SYNCED |
2282 MAC_STATUS_SIGNAL_DET |
2283 MAC_STATUS_CFG_CHANGED |
2284 MAC_STATUS_RCVD_CFG);
2285 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2286 MAC_STATUS_SIGNAL_DET)) {
2287 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2288 MAC_STATUS_CFG_CHANGED));
2293 tw32_f(MAC_TX_AUTO_NEG, 0);
2295 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2296 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2297 tw32_f(MAC_MODE, tp->mac_mode);
2300 if (tp->phy_id == PHY_ID_BCM8002)
2301 tg3_init_bcm8002(tp);
2303 /* Enable link change event even when serdes polling. */
2304 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2307 current_link_up = 0;
2308 mac_status = tr32(MAC_STATUS);
2310 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2311 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2313 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2315 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2316 tw32_f(MAC_MODE, tp->mac_mode);
2319 tp->hw_status->status =
2320 (SD_STATUS_UPDATED |
2321 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2323 for (i = 0; i < 100; i++) {
2324 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2325 MAC_STATUS_CFG_CHANGED));
2327 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2328 MAC_STATUS_CFG_CHANGED)) == 0)
2332 mac_status = tr32(MAC_STATUS);
2333 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2334 current_link_up = 0;
2335 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2336 tw32_f(MAC_MODE, (tp->mac_mode |
2337 MAC_MODE_SEND_CONFIGS));
2339 tw32_f(MAC_MODE, tp->mac_mode);
2343 if (current_link_up == 1) {
2344 tp->link_config.active_speed = SPEED_1000;
2345 tp->link_config.active_duplex = DUPLEX_FULL;
2346 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2347 LED_CTRL_LNKLED_OVERRIDE |
2348 LED_CTRL_1000MBPS_ON));
2350 tp->link_config.active_speed = SPEED_INVALID;
2351 tp->link_config.active_duplex = DUPLEX_INVALID;
2352 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2353 LED_CTRL_LNKLED_OVERRIDE |
2354 LED_CTRL_TRAFFIC_OVERRIDE));
2357 if (current_link_up != netif_carrier_ok(tp->dev)) {
2358 if (current_link_up)
2359 netif_carrier_on(tp->dev);
2361 netif_carrier_off(tp->dev);
2362 tg3_link_report(tp);
2365 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2367 if (orig_pause_cfg != now_pause_cfg ||
2368 orig_active_speed != tp->link_config.active_speed ||
2369 orig_active_duplex != tp->link_config.active_duplex)
2370 tg3_link_report(tp);
2376 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2380 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2381 err = tg3_setup_fiber_phy(tp, force_reset);
2383 err = tg3_setup_copper_phy(tp, force_reset);
2386 if (tp->link_config.active_speed == SPEED_1000 &&
2387 tp->link_config.active_duplex == DUPLEX_HALF)
2388 tw32(MAC_TX_LENGTHS,
2389 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2390 (6 << TX_LENGTHS_IPG_SHIFT) |
2391 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2393 tw32(MAC_TX_LENGTHS,
2394 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2395 (6 << TX_LENGTHS_IPG_SHIFT) |
2396 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2398 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2399 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2400 if (netif_carrier_ok(tp->dev)) {
2401 tw32(HOSTCC_STAT_COAL_TICKS,
2402 DEFAULT_STAT_COAL_TICKS);
2404 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2411 /* Tigon3 never reports partial packet sends. So we do not
2412 * need special logic to handle SKBs that have not had all
2413 * of their frags sent yet, like SunGEM does.
2415 static void tg3_tx(struct tg3 *tp)
2417 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2418 u32 sw_idx = tp->tx_cons;
2420 while (sw_idx != hw_idx) {
2421 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2422 struct sk_buff *skb = ri->skb;
2425 if (unlikely(skb == NULL))
2428 pci_unmap_single(tp->pdev,
2429 pci_unmap_addr(ri, mapping),
2435 sw_idx = NEXT_TX(sw_idx);
2437 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2438 if (unlikely(sw_idx == hw_idx))
2441 ri = &tp->tx_buffers[sw_idx];
2442 if (unlikely(ri->skb != NULL))
2445 pci_unmap_page(tp->pdev,
2446 pci_unmap_addr(ri, mapping),
2447 skb_shinfo(skb)->frags[i].size,
2450 sw_idx = NEXT_TX(sw_idx);
2453 dev_kfree_skb_irq(skb);
2456 tp->tx_cons = sw_idx;
2458 if (netif_queue_stopped(tp->dev) &&
2459 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2460 netif_wake_queue(tp->dev);
2463 /* Returns size of skb allocated or < 0 on error.
2465 * We only need to fill in the address because the other members
2466 * of the RX descriptor are invariant, see tg3_init_rings.
2468 * Note the purposeful assymetry of cpu vs. chip accesses. For
2469 * posting buffers we only dirty the first cache line of the RX
2470 * descriptor (containing the address). Whereas for the RX status
2471 * buffers the cpu only reads the last cacheline of the RX descriptor
2472 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2474 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2475 int src_idx, u32 dest_idx_unmasked)
2477 struct tg3_rx_buffer_desc *desc;
2478 struct ring_info *map, *src_map;
2479 struct sk_buff *skb;
2481 int skb_size, dest_idx;
2484 switch (opaque_key) {
2485 case RXD_OPAQUE_RING_STD:
2486 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2487 desc = &tp->rx_std[dest_idx];
2488 map = &tp->rx_std_buffers[dest_idx];
2490 src_map = &tp->rx_std_buffers[src_idx];
2491 skb_size = RX_PKT_BUF_SZ;
2494 case RXD_OPAQUE_RING_JUMBO:
2495 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2496 desc = &tp->rx_jumbo[dest_idx];
2497 map = &tp->rx_jumbo_buffers[dest_idx];
2499 src_map = &tp->rx_jumbo_buffers[src_idx];
2500 skb_size = RX_JUMBO_PKT_BUF_SZ;
2507 /* Do not overwrite any of the map or rp information
2508 * until we are sure we can commit to a new buffer.
2510 * Callers depend upon this behavior and assume that
2511 * we leave everything unchanged if we fail.
2513 skb = dev_alloc_skb(skb_size);
2518 skb_reserve(skb, tp->rx_offset);
2520 mapping = pci_map_single(tp->pdev, skb->data,
2521 skb_size - tp->rx_offset,
2522 PCI_DMA_FROMDEVICE);
2525 pci_unmap_addr_set(map, mapping, mapping);
2527 if (src_map != NULL)
2528 src_map->skb = NULL;
2530 desc->addr_hi = ((u64)mapping >> 32);
2531 desc->addr_lo = ((u64)mapping & 0xffffffff);
2536 /* We only need to move over in the address because the other
2537 * members of the RX descriptor are invariant. See notes above
2538 * tg3_alloc_rx_skb for full details.
2540 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2541 int src_idx, u32 dest_idx_unmasked)
2543 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2544 struct ring_info *src_map, *dest_map;
2547 switch (opaque_key) {
2548 case RXD_OPAQUE_RING_STD:
2549 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2550 dest_desc = &tp->rx_std[dest_idx];
2551 dest_map = &tp->rx_std_buffers[dest_idx];
2552 src_desc = &tp->rx_std[src_idx];
2553 src_map = &tp->rx_std_buffers[src_idx];
2556 case RXD_OPAQUE_RING_JUMBO:
2557 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2558 dest_desc = &tp->rx_jumbo[dest_idx];
2559 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2560 src_desc = &tp->rx_jumbo[src_idx];
2561 src_map = &tp->rx_jumbo_buffers[src_idx];
2568 dest_map->skb = src_map->skb;
2569 pci_unmap_addr_set(dest_map, mapping,
2570 pci_unmap_addr(src_map, mapping));
2571 dest_desc->addr_hi = src_desc->addr_hi;
2572 dest_desc->addr_lo = src_desc->addr_lo;
2574 src_map->skb = NULL;
2577 #if TG3_VLAN_TAG_USED
2578 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2580 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2584 /* The RX ring scheme is composed of multiple rings which post fresh
2585 * buffers to the chip, and one special ring the chip uses to report
2586 * status back to the host.
2588 * The special ring reports the status of received packets to the
2589 * host. The chip does not write into the original descriptor the
2590 * RX buffer was obtained from. The chip simply takes the original
2591 * descriptor as provided by the host, updates the status and length
2592 * field, then writes this into the next status ring entry.
2594 * Each ring the host uses to post buffers to the chip is described
2595 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2596 * it is first placed into the on-chip ram. When the packet's length
2597 * is known, it walks down the TG3_BDINFO entries to select the ring.
2598 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2599 * which is within the range of the new packet's length is chosen.
2601 * The "separate ring for rx status" scheme may sound queer, but it makes
2602 * sense from a cache coherency perspective. If only the host writes
2603 * to the buffer post rings, and only the chip writes to the rx status
2604 * rings, then cache lines never move beyond shared-modified state.
2605 * If both the host and chip were to write into the same ring, cache line
2606 * eviction could occur since both entities want it in an exclusive state.
2608 static int tg3_rx(struct tg3 *tp, int budget)
2611 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2615 hw_idx = tp->hw_status->idx[0].rx_producer;
2617 * We need to order the read of hw_idx and the read of
2618 * the opaque cookie.
2621 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2624 while (sw_idx != hw_idx && budget > 0) {
2625 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2627 struct sk_buff *skb;
2628 dma_addr_t dma_addr;
2629 u32 opaque_key, desc_idx, *post_ptr;
2631 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2632 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2633 if (opaque_key == RXD_OPAQUE_RING_STD) {
2634 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2636 skb = tp->rx_std_buffers[desc_idx].skb;
2637 post_ptr = &tp->rx_std_ptr;
2638 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2639 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2641 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2642 post_ptr = &tp->rx_jumbo_ptr;
2645 goto next_pkt_nopost;
2648 work_mask |= opaque_key;
2650 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2651 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2653 tg3_recycle_rx(tp, opaque_key,
2654 desc_idx, *post_ptr);
2656 /* Other statistics kept track of by card. */
2657 tp->net_stats.rx_dropped++;
2661 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2663 if (len > RX_COPY_THRESHOLD) {
2666 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2667 desc_idx, *post_ptr);
2671 pci_unmap_single(tp->pdev, dma_addr,
2672 skb_size - tp->rx_offset,
2673 PCI_DMA_FROMDEVICE);
2677 struct sk_buff *copy_skb;
2679 tg3_recycle_rx(tp, opaque_key,
2680 desc_idx, *post_ptr);
2682 copy_skb = dev_alloc_skb(len + 2);
2683 if (copy_skb == NULL)
2684 goto drop_it_no_recycle;
2686 copy_skb->dev = tp->dev;
2687 skb_reserve(copy_skb, 2);
2688 skb_put(copy_skb, len);
2689 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2690 memcpy(copy_skb->data, skb->data, len);
2691 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2693 /* We'll reuse the original ring buffer. */
2697 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2698 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2699 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2700 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2701 skb->ip_summed = CHECKSUM_UNNECESSARY;
2703 skb->ip_summed = CHECKSUM_NONE;
2705 skb->protocol = eth_type_trans(skb, tp->dev);
2706 #if TG3_VLAN_TAG_USED
2707 if (tp->vlgrp != NULL &&
2708 desc->type_flags & RXD_FLAG_VLAN) {
2709 tg3_vlan_rx(tp, skb,
2710 desc->err_vlan & RXD_VLAN_MASK);
2713 netif_receive_skb(skb);
2715 tp->dev->last_rx = jiffies;
2723 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2726 /* ACK the status ring. */
2727 tp->rx_rcb_ptr = rx_rcb_ptr;
2728 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2729 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2731 /* Refill RX ring(s). */
2732 if (work_mask & RXD_OPAQUE_RING_STD) {
2733 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2734 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2737 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2738 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2739 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2746 static int tg3_poll(struct net_device *netdev, int *budget)
2748 struct tg3 *tp = netdev_priv(netdev);
2749 struct tg3_hw_status *sblk = tp->hw_status;
2750 unsigned long flags;
2753 spin_lock_irqsave(&tp->lock, flags);
2755 /* handle link change and other phy events */
2756 if (!(tp->tg3_flags &
2757 (TG3_FLAG_USE_LINKCHG_REG |
2758 TG3_FLAG_POLL_SERDES))) {
2759 if (sblk->status & SD_STATUS_LINK_CHG) {
2760 sblk->status = SD_STATUS_UPDATED |
2761 (sblk->status & ~SD_STATUS_LINK_CHG);
2762 tg3_setup_phy(tp, 0);
2766 /* run TX completion thread */
2767 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2768 spin_lock(&tp->tx_lock);
2770 spin_unlock(&tp->tx_lock);
2773 spin_unlock_irqrestore(&tp->lock, flags);
2775 /* run RX thread, within the bounds set by NAPI.
2776 * All RX "locking" is done by ensuring outside
2777 * code synchronizes with dev->poll()
2780 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2781 int orig_budget = *budget;
2784 if (orig_budget > netdev->quota)
2785 orig_budget = netdev->quota;
2787 work_done = tg3_rx(tp, orig_budget);
2789 *budget -= work_done;
2790 netdev->quota -= work_done;
2792 if (work_done >= orig_budget)
2796 /* if no more work, tell net stack and NIC we're done */
2798 spin_lock_irqsave(&tp->lock, flags);
2799 __netif_rx_complete(netdev);
2800 tg3_enable_ints(tp);
2801 spin_unlock_irqrestore(&tp->lock, flags);
2804 return (done ? 0 : 1);
2807 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2809 struct tg3_hw_status *sblk = tp->hw_status;
2810 unsigned int work_exists = 0;
2812 /* check for phy events */
2813 if (!(tp->tg3_flags &
2814 (TG3_FLAG_USE_LINKCHG_REG |
2815 TG3_FLAG_POLL_SERDES))) {
2816 if (sblk->status & SD_STATUS_LINK_CHG)
2819 /* check for RX/TX work to do */
2820 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2821 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2827 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2829 struct net_device *dev = dev_id;
2830 struct tg3 *tp = netdev_priv(dev);
2831 struct tg3_hw_status *sblk = tp->hw_status;
2832 unsigned long flags;
2833 unsigned int handled = 1;
2835 spin_lock_irqsave(&tp->lock, flags);
2837 if (sblk->status & SD_STATUS_UPDATED) {
2839 * writing any value to intr-mbox-0 clears PCI INTA# and
2840 * chip-internal interrupt pending events.
2841 * writing non-zero to intr-mbox-0 additional tells the
2842 * NIC to stop sending us irqs, engaging "in-intr-handler"
2845 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2848 * Flush PCI write. This also guarantees that our
2849 * status block has been flushed to host memory.
2851 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2852 sblk->status &= ~SD_STATUS_UPDATED;
2854 if (likely(tg3_has_work(dev, tp)))
2855 netif_rx_schedule(dev); /* schedule NAPI poll */
2857 /* no work, shared interrupt perhaps? re-enable
2858 * interrupts, and flush that PCI write
2860 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2862 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2864 } else { /* shared interrupt */
2868 spin_unlock_irqrestore(&tp->lock, flags);
2870 return IRQ_RETVAL(handled);
2873 static int tg3_init_hw(struct tg3 *);
2874 static int tg3_halt(struct tg3 *);
2876 #ifdef CONFIG_NET_POLL_CONTROLLER
2877 static void tg3_poll_controller(struct net_device *dev)
2879 tg3_interrupt(dev->irq, dev, NULL);
2883 static void tg3_reset_task(void *_data)
2885 struct tg3 *tp = _data;
2886 unsigned int restart_timer;
2890 spin_lock_irq(&tp->lock);
2891 spin_lock(&tp->tx_lock);
2893 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2894 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2899 tg3_netif_start(tp);
2901 spin_unlock(&tp->tx_lock);
2902 spin_unlock_irq(&tp->lock);
2905 mod_timer(&tp->timer, jiffies + 1);
2908 static void tg3_tx_timeout(struct net_device *dev)
2910 struct tg3 *tp = netdev_priv(dev);
2912 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2915 schedule_work(&tp->reset_task);
2918 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2920 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2921 u32 guilty_entry, int guilty_len,
2922 u32 last_plus_one, u32 *start, u32 mss)
2924 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2925 dma_addr_t new_addr;
2934 /* New SKB is guaranteed to be linear. */
2936 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2938 tg3_set_txd(tp, entry, new_addr, new_skb->len,
2939 (skb->ip_summed == CHECKSUM_HW) ?
2940 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2941 *start = NEXT_TX(entry);
2943 /* Now clean up the sw ring entries. */
2945 while (entry != last_plus_one) {
2949 len = skb_headlen(skb);
2951 len = skb_shinfo(skb)->frags[i-1].size;
2952 pci_unmap_single(tp->pdev,
2953 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2954 len, PCI_DMA_TODEVICE);
2956 tp->tx_buffers[entry].skb = new_skb;
2957 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2959 tp->tx_buffers[entry].skb = NULL;
2961 entry = NEXT_TX(entry);
2969 static void tg3_set_txd(struct tg3 *tp, int entry,
2970 dma_addr_t mapping, int len, u32 flags,
2973 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2974 int is_end = (mss_and_is_end & 0x1);
2975 u32 mss = (mss_and_is_end >> 1);
2979 flags |= TXD_FLAG_END;
2980 if (flags & TXD_FLAG_VLAN) {
2981 vlan_tag = flags >> 16;
2984 vlan_tag |= (mss << TXD_MSS_SHIFT);
2986 txd->addr_hi = ((u64) mapping >> 32);
2987 txd->addr_lo = ((u64) mapping & 0xffffffff);
2988 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2989 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2992 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2994 u32 base = (u32) mapping & 0xffffffff;
2996 return ((base > 0xffffdcc0) &&
2997 (base + len + 8 < base));
3000 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3002 struct tg3 *tp = netdev_priv(dev);
3005 u32 len, entry, base_flags, mss;
3006 int would_hit_hwbug;
3007 unsigned long flags;
3009 len = skb_headlen(skb);
3011 /* No BH disabling for tx_lock here. We are running in BH disabled
3012 * context and TX reclaim runs via tp->poll inside of a software
3013 * interrupt. Rejoice!
3015 * Actually, things are not so simple. If we are to take a hw
3016 * IRQ here, we can deadlock, consider:
3025 * spin on tp->tx_lock
3027 * So we really do need to disable interrupts when taking
3030 local_irq_save(flags);
3031 if (!spin_trylock(&tp->tx_lock)) {
3032 local_irq_restore(flags);
3033 return NETDEV_TX_LOCKED;
3036 /* This is a hard error, log it. */
3037 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3038 netif_stop_queue(dev);
3039 spin_unlock_irqrestore(&tp->tx_lock, flags);
3040 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3042 return NETDEV_TX_BUSY;
3045 entry = tp->tx_prod;
3047 if (skb->ip_summed == CHECKSUM_HW)
3048 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3049 #if TG3_TSO_SUPPORT != 0
3051 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3052 (mss = skb_shinfo(skb)->tso_size) != 0) {
3053 int tcp_opt_len, ip_tcp_len;
3055 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3056 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3058 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3059 TXD_FLAG_CPU_POST_DMA);
3061 skb->nh.iph->check = 0;
3062 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3063 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3068 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3071 tsflags = ((skb->nh.iph->ihl - 5) +
3072 (tcp_opt_len >> 2));
3073 mss |= (tsflags << 11);
3076 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3079 tsflags = ((skb->nh.iph->ihl - 5) +
3080 (tcp_opt_len >> 2));
3081 base_flags |= tsflags << 12;
3088 #if TG3_VLAN_TAG_USED
3089 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3090 base_flags |= (TXD_FLAG_VLAN |
3091 (vlan_tx_tag_get(skb) << 16));
3094 /* Queue skb data, a.k.a. the main skb fragment. */
3095 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3097 tp->tx_buffers[entry].skb = skb;
3098 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3100 would_hit_hwbug = 0;
3102 if (tg3_4g_overflow_test(mapping, len))
3103 would_hit_hwbug = entry + 1;
3105 tg3_set_txd(tp, entry, mapping, len, base_flags,
3106 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3108 entry = NEXT_TX(entry);
3110 /* Now loop through additional data fragments, and queue them. */
3111 if (skb_shinfo(skb)->nr_frags > 0) {
3112 unsigned int i, last;
3114 last = skb_shinfo(skb)->nr_frags - 1;
3115 for (i = 0; i <= last; i++) {
3116 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3119 mapping = pci_map_page(tp->pdev,
3122 len, PCI_DMA_TODEVICE);
3124 tp->tx_buffers[entry].skb = NULL;
3125 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3127 if (tg3_4g_overflow_test(mapping, len)) {
3128 /* Only one should match. */
3129 if (would_hit_hwbug)
3131 would_hit_hwbug = entry + 1;
3134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3135 tg3_set_txd(tp, entry, mapping, len,
3136 base_flags, (i == last)|(mss << 1));
3138 tg3_set_txd(tp, entry, mapping, len,
3139 base_flags, (i == last));
3141 entry = NEXT_TX(entry);
3145 if (would_hit_hwbug) {
3146 u32 last_plus_one = entry;
3148 unsigned int len = 0;
3150 would_hit_hwbug -= 1;
3151 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3152 entry &= (TG3_TX_RING_SIZE - 1);
3155 while (entry != last_plus_one) {
3157 len = skb_headlen(skb);
3159 len = skb_shinfo(skb)->frags[i-1].size;
3161 if (entry == would_hit_hwbug)
3165 entry = NEXT_TX(entry);
3169 /* If the workaround fails due to memory/mapping
3170 * failure, silently drop this packet.
3172 if (tigon3_4gb_hwbug_workaround(tp, skb,
3181 /* Packets are ready, update Tx producer idx local and on card. */
3182 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3184 tp->tx_prod = entry;
3185 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3186 netif_stop_queue(dev);
3189 spin_unlock_irqrestore(&tp->tx_lock, flags);
3191 dev->trans_start = jiffies;
3193 return NETDEV_TX_OK;
3196 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3201 if (new_mtu > ETH_DATA_LEN)
3202 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3204 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3207 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3209 struct tg3 *tp = netdev_priv(dev);
3211 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3214 if (!netif_running(dev)) {
3215 /* We'll just catch it later when the
3218 tg3_set_mtu(dev, tp, new_mtu);
3223 spin_lock_irq(&tp->lock);
3224 spin_lock(&tp->tx_lock);
3228 tg3_set_mtu(dev, tp, new_mtu);
3232 tg3_netif_start(tp);
3234 spin_unlock(&tp->tx_lock);
3235 spin_unlock_irq(&tp->lock);
3240 /* Free up pending packets in all rx/tx rings.
3242 * The chip has been shut down and the driver detached from
3243 * the networking, so no interrupts or new tx packets will
3244 * end up in the driver. tp->{tx,}lock is not held and we are not
3245 * in an interrupt context and thus may sleep.
3247 static void tg3_free_rings(struct tg3 *tp)
3249 struct ring_info *rxp;
3252 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3253 rxp = &tp->rx_std_buffers[i];
3255 if (rxp->skb == NULL)
3257 pci_unmap_single(tp->pdev,
3258 pci_unmap_addr(rxp, mapping),
3259 RX_PKT_BUF_SZ - tp->rx_offset,
3260 PCI_DMA_FROMDEVICE);
3261 dev_kfree_skb_any(rxp->skb);
3265 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3266 rxp = &tp->rx_jumbo_buffers[i];
3268 if (rxp->skb == NULL)
3270 pci_unmap_single(tp->pdev,
3271 pci_unmap_addr(rxp, mapping),
3272 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3273 PCI_DMA_FROMDEVICE);
3274 dev_kfree_skb_any(rxp->skb);
3278 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3279 struct tx_ring_info *txp;
3280 struct sk_buff *skb;
3283 txp = &tp->tx_buffers[i];
3291 pci_unmap_single(tp->pdev,
3292 pci_unmap_addr(txp, mapping),
3299 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3300 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3301 pci_unmap_page(tp->pdev,
3302 pci_unmap_addr(txp, mapping),
3303 skb_shinfo(skb)->frags[j].size,
3308 dev_kfree_skb_any(skb);
3312 /* Initialize tx/rx rings for packet processing.
3314 * The chip has been shut down and the driver detached from
3315 * the networking, so no interrupts or new tx packets will
3316 * end up in the driver. tp->{tx,}lock are held and thus
3319 static void tg3_init_rings(struct tg3 *tp)
3323 /* Free up all the SKBs. */
3326 /* Zero out all descriptors. */
3327 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3328 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3329 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3330 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3332 /* Initialize invariants of the rings, we only set this
3333 * stuff once. This works because the card does not
3334 * write into the rx buffer posting rings.
3336 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3337 struct tg3_rx_buffer_desc *rxd;
3339 rxd = &tp->rx_std[i];
3340 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3342 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3343 rxd->opaque = (RXD_OPAQUE_RING_STD |
3344 (i << RXD_OPAQUE_INDEX_SHIFT));
3347 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3348 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3349 struct tg3_rx_buffer_desc *rxd;
3351 rxd = &tp->rx_jumbo[i];
3352 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3354 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3356 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3357 (i << RXD_OPAQUE_INDEX_SHIFT));
3361 /* Now allocate fresh SKBs for each rx ring. */
3362 for (i = 0; i < tp->rx_pending; i++) {
3363 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3368 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3369 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3370 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3378 * Must not be invoked with interrupt sources disabled and
3379 * the hardware shutdown down.
3381 static void tg3_free_consistent(struct tg3 *tp)
3383 if (tp->rx_std_buffers) {
3384 kfree(tp->rx_std_buffers);
3385 tp->rx_std_buffers = NULL;
3388 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3389 tp->rx_std, tp->rx_std_mapping);
3393 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3394 tp->rx_jumbo, tp->rx_jumbo_mapping);
3395 tp->rx_jumbo = NULL;
3398 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3399 tp->rx_rcb, tp->rx_rcb_mapping);
3403 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3404 tp->tx_ring, tp->tx_desc_mapping);
3407 if (tp->hw_status) {
3408 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3409 tp->hw_status, tp->status_mapping);
3410 tp->hw_status = NULL;
3413 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3414 tp->hw_stats, tp->stats_mapping);
3415 tp->hw_stats = NULL;
3420 * Must not be invoked with interrupt sources disabled and
3421 * the hardware shutdown down. Can sleep.
3423 static int tg3_alloc_consistent(struct tg3 *tp)
3425 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3427 TG3_RX_JUMBO_RING_SIZE)) +
3428 (sizeof(struct tx_ring_info) *
3431 if (!tp->rx_std_buffers)
3434 memset(tp->rx_std_buffers, 0,
3435 (sizeof(struct ring_info) *
3437 TG3_RX_JUMBO_RING_SIZE)) +
3438 (sizeof(struct tx_ring_info) *
3441 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3442 tp->tx_buffers = (struct tx_ring_info *)
3443 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3445 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3446 &tp->rx_std_mapping);
3450 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3451 &tp->rx_jumbo_mapping);
3456 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3457 &tp->rx_rcb_mapping);
3461 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3462 &tp->tx_desc_mapping);
3466 tp->hw_status = pci_alloc_consistent(tp->pdev,
3468 &tp->status_mapping);
3472 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3473 sizeof(struct tg3_hw_stats),
3474 &tp->stats_mapping);
3478 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3479 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3484 tg3_free_consistent(tp);
3488 #define MAX_WAIT_CNT 1000
3490 /* To stop a block, clear the enable bit and poll till it
3491 * clears. tp->lock is held.
3493 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3506 /* We can't enable/disable these bits of the
3507 * 5705/5750, just say success.
3520 for (i = 0; i < MAX_WAIT_CNT; i++) {
3523 if ((val & enable_bit) == 0)
3527 if (i == MAX_WAIT_CNT) {
3528 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3529 "ofs=%lx enable_bit=%x\n",
3537 /* tp->lock is held. */
3538 static int tg3_abort_hw(struct tg3 *tp)
3542 tg3_disable_ints(tp);
3544 tp->rx_mode &= ~RX_MODE_ENABLE;
3545 tw32_f(MAC_RX_MODE, tp->rx_mode);
3548 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3549 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3550 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3551 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3552 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3553 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3555 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3556 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3557 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3558 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3559 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3560 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3561 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3565 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3566 tw32_f(MAC_MODE, tp->mac_mode);
3569 tp->tx_mode &= ~TX_MODE_ENABLE;
3570 tw32_f(MAC_TX_MODE, tp->tx_mode);
3572 for (i = 0; i < MAX_WAIT_CNT; i++) {
3574 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3577 if (i >= MAX_WAIT_CNT) {
3578 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3579 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3580 tp->dev->name, tr32(MAC_TX_MODE));
3584 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3585 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3586 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3588 tw32(FTQ_RESET, 0xffffffff);
3589 tw32(FTQ_RESET, 0x00000000);
3591 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3592 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3597 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3599 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3605 /* tp->lock is held. */
3606 static int tg3_nvram_lock(struct tg3 *tp)
3608 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3611 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3612 for (i = 0; i < 8000; i++) {
3613 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3623 /* tp->lock is held. */
3624 static void tg3_nvram_unlock(struct tg3 *tp)
3626 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3627 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3630 /* tp->lock is held. */
3631 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3633 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3634 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3636 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3638 case RESET_KIND_INIT:
3639 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3643 case RESET_KIND_SHUTDOWN:
3644 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3648 case RESET_KIND_SUSPEND:
3649 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3659 /* tp->lock is held. */
3660 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3662 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3664 case RESET_KIND_INIT:
3665 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3666 DRV_STATE_START_DONE);
3669 case RESET_KIND_SHUTDOWN:
3670 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3671 DRV_STATE_UNLOAD_DONE);
3680 /* tp->lock is held. */
3681 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3683 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3685 case RESET_KIND_INIT:
3686 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3690 case RESET_KIND_SHUTDOWN:
3691 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3695 case RESET_KIND_SUSPEND:
3696 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3706 static void tg3_stop_fw(struct tg3 *);
3708 /* tp->lock is held. */
3709 static int tg3_chip_reset(struct tg3 *tp)
3715 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3719 * We must avoid the readl() that normally takes place.
3720 * It locks machines, causes machine checks, and other
3721 * fun things. So, temporarily disable the 5701
3722 * hardware workaround, while we do the reset.
3724 flags_save = tp->tg3_flags;
3725 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3728 val = GRC_MISC_CFG_CORECLK_RESET;
3730 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3731 if (tr32(0x7e2c) == 0x60) {
3734 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3735 tw32(GRC_MISC_CFG, (1 << 29));
3740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3742 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3743 tw32(GRC_MISC_CFG, val);
3745 /* restore 5701 hardware bug workaround flag */
3746 tp->tg3_flags = flags_save;
3748 /* Unfortunately, we have to delay before the PCI read back.
3749 * Some 575X chips even will not respond to a PCI cfg access
3750 * when the reset command is given to the chip.
3752 * How do these hardware designers expect things to work
3753 * properly if the PCI write is posted for a long period
3754 * of time? It is always necessary to have some method by
3755 * which a register read back can occur to push the write
3756 * out which does the reset.
3758 * For most tg3 variants the trick below was working.
3763 /* Flush PCI posted writes. The normal MMIO registers
3764 * are inaccessible at this time so this is the only
3765 * way to make this reliably (actually, this is no longer
3766 * the case, see above). I tried to use indirect
3767 * register read/write but this upset some 5701 variants.
3769 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3773 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3774 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3778 /* Wait for link training to complete. */
3779 for (i = 0; i < 5000; i++)
3782 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3783 pci_write_config_dword(tp->pdev, 0xc4,
3784 cfg_val | (1 << 15));
3786 /* Set PCIE max payload size and clear error status. */
3787 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3790 /* Re-enable indirect register accesses. */
3791 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3792 tp->misc_host_ctrl);
3794 /* Set MAX PCI retry to zero. */
3795 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3796 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3797 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3798 val |= PCISTATE_RETRY_SAME_DMA;
3799 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3801 pci_restore_state(tp->pdev, tp->pci_cfg_state);
3803 /* Make sure PCI-X relaxed ordering bit is clear. */
3804 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3805 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3806 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3808 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3810 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3812 tw32(0x5000, 0x400);
3815 tw32(GRC_MODE, tp->grc_mode);
3817 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3818 u32 val = tr32(0xc4);
3820 tw32(0xc4, val | (1 << 15));
3823 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3825 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3826 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3827 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3828 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3831 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3832 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3833 tw32_f(MAC_MODE, tp->mac_mode);
3835 tw32_f(MAC_MODE, 0);
3838 /* Wait for firmware initialization to complete. */
3839 for (i = 0; i < 100000; i++) {
3840 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3841 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3846 !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3847 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3848 "firmware will not restart magic=%08x\n",
3849 tp->dev->name, val);
3853 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3854 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3855 u32 val = tr32(0x7c00);
3857 tw32(0x7c00, val | (1 << 25));
3860 /* Reprobe ASF enable state. */
3861 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3862 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3863 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3864 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3867 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3868 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3869 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3870 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3871 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3878 /* tp->lock is held. */
3879 static void tg3_stop_fw(struct tg3 *tp)
3881 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3885 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3886 val = tr32(GRC_RX_CPU_EVENT);
3888 tw32(GRC_RX_CPU_EVENT, val);
3890 /* Wait for RX cpu to ACK the event. */
3891 for (i = 0; i < 100; i++) {
3892 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3899 /* tp->lock is held. */
3900 static int tg3_halt(struct tg3 *tp)
3906 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3909 err = tg3_chip_reset(tp);
3911 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3912 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3920 #define TG3_FW_RELEASE_MAJOR 0x0
3921 #define TG3_FW_RELASE_MINOR 0x0
3922 #define TG3_FW_RELEASE_FIX 0x0
3923 #define TG3_FW_START_ADDR 0x08000000
3924 #define TG3_FW_TEXT_ADDR 0x08000000
3925 #define TG3_FW_TEXT_LEN 0x9c0
3926 #define TG3_FW_RODATA_ADDR 0x080009c0
3927 #define TG3_FW_RODATA_LEN 0x60
3928 #define TG3_FW_DATA_ADDR 0x08000a40
3929 #define TG3_FW_DATA_LEN 0x20
3930 #define TG3_FW_SBSS_ADDR 0x08000a60
3931 #define TG3_FW_SBSS_LEN 0xc
3932 #define TG3_FW_BSS_ADDR 0x08000a70
3933 #define TG3_FW_BSS_LEN 0x10
3935 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3936 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3937 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3938 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3939 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3940 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3941 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3942 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3943 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3944 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3945 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3946 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3947 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3948 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3949 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3950 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3951 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3952 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3953 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3954 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3955 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3956 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3957 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3958 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3959 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3960 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3962 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3963 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3964 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3965 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3966 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3967 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3968 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3969 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3970 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3971 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3972 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3973 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3974 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3975 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3976 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3977 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3978 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3979 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3980 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3981 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3982 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3983 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3984 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3985 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3986 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3987 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3988 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3989 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3990 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3991 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3992 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3993 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3994 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3995 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3996 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3997 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3998 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3999 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4000 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4001 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4002 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4003 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4004 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4005 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4006 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4007 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4008 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4009 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4010 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4011 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4012 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4013 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4014 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4015 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4016 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4017 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4018 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4019 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4020 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4021 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4022 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4023 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4024 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4025 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4026 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4029 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4030 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4031 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4032 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4033 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4037 #if 0 /* All zeros, don't eat up space with it. */
4038 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4039 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4040 0x00000000, 0x00000000, 0x00000000, 0x00000000
4044 #define RX_CPU_SCRATCH_BASE 0x30000
4045 #define RX_CPU_SCRATCH_SIZE 0x04000
4046 #define TX_CPU_SCRATCH_BASE 0x34000
4047 #define TX_CPU_SCRATCH_SIZE 0x04000
4049 /* tp->lock is held. */
4050 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4054 if (offset == TX_CPU_BASE &&
4055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4058 if (offset == RX_CPU_BASE) {
4059 for (i = 0; i < 10000; i++) {
4060 tw32(offset + CPU_STATE, 0xffffffff);
4061 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4062 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4066 tw32(offset + CPU_STATE, 0xffffffff);
4067 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4070 for (i = 0; i < 10000; i++) {
4071 tw32(offset + CPU_STATE, 0xffffffff);
4072 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4073 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4079 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4082 (offset == RX_CPU_BASE ? "RX" : "TX"));
4089 unsigned int text_base;
4090 unsigned int text_len;
4092 unsigned int rodata_base;
4093 unsigned int rodata_len;
4095 unsigned int data_base;
4096 unsigned int data_len;
4100 /* tp->lock is held. */
4101 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4102 int cpu_scratch_size, struct fw_info *info)
4105 u32 orig_tg3_flags = tp->tg3_flags;
4106 void (*write_op)(struct tg3 *, u32, u32);
4108 if (cpu_base == TX_CPU_BASE &&
4109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4110 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4111 "TX cpu firmware on %s which is 5705.\n",
4116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4117 write_op = tg3_write_mem;
4119 write_op = tg3_write_indirect_reg32;
4121 /* Force use of PCI config space for indirect register
4124 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4126 err = tg3_halt_cpu(tp, cpu_base);
4130 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4131 write_op(tp, cpu_scratch_base + i, 0);
4132 tw32(cpu_base + CPU_STATE, 0xffffffff);
4133 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4134 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4135 write_op(tp, (cpu_scratch_base +
4136 (info->text_base & 0xffff) +
4139 info->text_data[i] : 0));
4140 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4141 write_op(tp, (cpu_scratch_base +
4142 (info->rodata_base & 0xffff) +
4144 (info->rodata_data ?
4145 info->rodata_data[i] : 0));
4146 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4147 write_op(tp, (cpu_scratch_base +
4148 (info->data_base & 0xffff) +
4151 info->data_data[i] : 0));
4156 tp->tg3_flags = orig_tg3_flags;
4160 /* tp->lock is held. */
4161 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4163 struct fw_info info;
4166 info.text_base = TG3_FW_TEXT_ADDR;
4167 info.text_len = TG3_FW_TEXT_LEN;
4168 info.text_data = &tg3FwText[0];
4169 info.rodata_base = TG3_FW_RODATA_ADDR;
4170 info.rodata_len = TG3_FW_RODATA_LEN;
4171 info.rodata_data = &tg3FwRodata[0];
4172 info.data_base = TG3_FW_DATA_ADDR;
4173 info.data_len = TG3_FW_DATA_LEN;
4174 info.data_data = NULL;
4176 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4177 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4182 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4183 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4188 /* Now startup only the RX cpu. */
4189 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4190 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4192 for (i = 0; i < 5; i++) {
4193 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4195 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4196 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4197 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4201 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4202 "to set RX CPU PC, is %08x should be %08x\n",
4203 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4207 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4208 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4213 #if TG3_TSO_SUPPORT != 0
4215 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4216 #define TG3_TSO_FW_RELASE_MINOR 0x6
4217 #define TG3_TSO_FW_RELEASE_FIX 0x0
4218 #define TG3_TSO_FW_START_ADDR 0x08000000
4219 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4220 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4221 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4222 #define TG3_TSO_FW_RODATA_LEN 0x60
4223 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4224 #define TG3_TSO_FW_DATA_LEN 0x30
4225 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4226 #define TG3_TSO_FW_SBSS_LEN 0x2c
4227 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4228 #define TG3_TSO_FW_BSS_LEN 0x894
4230 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4231 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4232 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4233 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4234 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4235 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4236 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4237 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4238 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4239 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4240 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4241 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4242 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4243 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4244 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4245 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4246 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4247 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4248 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4249 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4250 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4251 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4252 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4253 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4254 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4255 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4256 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4257 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4258 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4259 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4260 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4261 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4262 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4263 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4264 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4265 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4266 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4267 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4268 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4269 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4270 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4271 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4272 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4273 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4274 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4275 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4276 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4277 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4278 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4279 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4280 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4281 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4282 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4283 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4284 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4285 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4286 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4287 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4288 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4289 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4290 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4291 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4292 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4293 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4294 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4295 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4296 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4297 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4298 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4299 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4300 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4301 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4302 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4303 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4304 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4305 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4306 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4307 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4308 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4309 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4310 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4311 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4312 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4313 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4314 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4315 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4316 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4317 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4318 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4319 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4320 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4321 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4322 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4323 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4324 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4325 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4326 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4327 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4328 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4329 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4330 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4331 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4332 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4333 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4334 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4335 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4336 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4337 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4338 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4339 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4340 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4341 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4342 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4343 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4344 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4345 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4346 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4347 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4348 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4349 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4350 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4351 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4352 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4353 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4354 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4355 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4356 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4357 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4358 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4359 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4360 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4361 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4362 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4363 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4364 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4365 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4366 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4367 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4368 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4369 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4370 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4371 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4372 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4373 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4374 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4375 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4376 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4377 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4378 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4379 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4380 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4381 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4382 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4383 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4384 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4385 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4386 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4387 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4388 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4389 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4390 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4391 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4392 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4393 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4394 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4395 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4396 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4397 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4398 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4399 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4400 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4401 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4402 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4403 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4404 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4405 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4406 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4407 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4408 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4409 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4410 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4411 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4412 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4413 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4414 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4415 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4416 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4417 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4418 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4419 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4420 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4421 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4422 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4423 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4424 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4425 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4426 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4427 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4428 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4429 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4430 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4431 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4432 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4433 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4434 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4435 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4436 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4437 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4438 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4439 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4440 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4441 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4442 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4443 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4444 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4445 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4446 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4447 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4448 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4449 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4450 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4451 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4452 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4453 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4454 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4455 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4456 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4457 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4458 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4459 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4460 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4461 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4462 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4463 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4464 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4465 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4466 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4467 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4468 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4469 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4470 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4471 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4472 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4473 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4474 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4475 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4476 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4477 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4478 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4479 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4480 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4481 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4482 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4483 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4484 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4485 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4486 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4487 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4488 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4489 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4490 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4491 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4492 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4493 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4494 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4495 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4496 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4497 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4498 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4499 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4500 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4501 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4502 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4503 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4504 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4505 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4506 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4507 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4508 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4509 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4510 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4511 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4512 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4513 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4514 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4517 u32 tg3TsoFwRodata[] = {
4518 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4519 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4520 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4521 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4525 u32 tg3TsoFwData[] = {
4526 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4527 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4531 /* 5705 needs a special version of the TSO firmware. */
4532 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4533 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4534 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4535 #define TG3_TSO5_FW_START_ADDR 0x00010000
4536 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4537 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4538 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4539 #define TG3_TSO5_FW_RODATA_LEN 0x50
4540 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4541 #define TG3_TSO5_FW_DATA_LEN 0x20
4542 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4543 #define TG3_TSO5_FW_SBSS_LEN 0x28
4544 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4545 #define TG3_TSO5_FW_BSS_LEN 0x88
4547 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4548 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4549 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4550 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4551 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4552 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4553 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4554 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4555 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4556 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4557 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4558 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4559 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4560 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4561 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4562 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4563 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4564 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4565 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4566 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4567 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4568 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4569 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4570 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4571 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4572 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4573 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4574 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4575 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4576 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4577 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4578 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4579 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4580 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4581 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4582 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4583 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4584 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4585 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4586 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4587 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4588 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4589 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4590 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4591 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4592 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4593 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4594 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4595 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4596 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4597 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4598 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4599 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4600 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4601 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4602 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4603 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4604 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4605 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4606 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4607 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4608 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4609 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4610 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4611 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4612 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4613 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4614 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4615 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4616 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4617 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4618 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4619 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4620 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4621 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4622 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4623 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4624 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4625 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4626 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4627 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4628 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4629 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4630 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4631 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4632 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4633 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4634 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4635 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4636 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4637 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4638 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4639 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4640 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4641 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4642 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4643 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4644 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4645 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4646 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4647 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4648 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4649 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4650 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4651 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4652 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4653 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4654 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4655 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4656 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4657 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4658 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4659 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4660 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4661 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4662 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4663 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4664 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4665 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4666 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4667 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4668 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4669 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4670 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4671 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4672 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4673 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4674 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4675 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4676 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4677 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4678 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4679 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4680 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4681 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4682 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4683 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4684 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4685 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4686 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4687 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4688 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4689 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4690 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4691 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4692 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4693 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4694 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4695 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4696 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4697 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4698 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4699 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4700 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4701 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4702 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4703 0x00000000, 0x00000000, 0x00000000,
4706 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4707 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4708 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4709 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4710 0x00000000, 0x00000000, 0x00000000,
4713 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4714 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4715 0x00000000, 0x00000000, 0x00000000,
4718 /* tp->lock is held. */
4719 static int tg3_load_tso_firmware(struct tg3 *tp)
4721 struct fw_info info;
4722 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4729 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4730 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4731 info.text_data = &tg3Tso5FwText[0];
4732 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4733 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4734 info.rodata_data = &tg3Tso5FwRodata[0];
4735 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4736 info.data_len = TG3_TSO5_FW_DATA_LEN;
4737 info.data_data = &tg3Tso5FwData[0];
4738 cpu_base = RX_CPU_BASE;
4739 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4740 cpu_scratch_size = (info.text_len +
4743 TG3_TSO5_FW_SBSS_LEN +
4744 TG3_TSO5_FW_BSS_LEN);
4746 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4747 info.text_len = TG3_TSO_FW_TEXT_LEN;
4748 info.text_data = &tg3TsoFwText[0];
4749 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4750 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4751 info.rodata_data = &tg3TsoFwRodata[0];
4752 info.data_base = TG3_TSO_FW_DATA_ADDR;
4753 info.data_len = TG3_TSO_FW_DATA_LEN;
4754 info.data_data = &tg3TsoFwData[0];
4755 cpu_base = TX_CPU_BASE;
4756 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4757 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4760 err = tg3_load_firmware_cpu(tp, cpu_base,
4761 cpu_scratch_base, cpu_scratch_size,
4766 /* Now startup the cpu. */
4767 tw32(cpu_base + CPU_STATE, 0xffffffff);
4768 tw32_f(cpu_base + CPU_PC, info.text_base);
4770 for (i = 0; i < 5; i++) {
4771 if (tr32(cpu_base + CPU_PC) == info.text_base)
4773 tw32(cpu_base + CPU_STATE, 0xffffffff);
4774 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
4775 tw32_f(cpu_base + CPU_PC, info.text_base);
4779 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4780 "to set CPU PC, is %08x should be %08x\n",
4781 tp->dev->name, tr32(cpu_base + CPU_PC),
4785 tw32(cpu_base + CPU_STATE, 0xffffffff);
4786 tw32_f(cpu_base + CPU_MODE, 0x00000000);
4790 #endif /* TG3_TSO_SUPPORT != 0 */
4792 /* tp->lock is held. */
4793 static void __tg3_set_mac_addr(struct tg3 *tp)
4795 u32 addr_high, addr_low;
4798 addr_high = ((tp->dev->dev_addr[0] << 8) |
4799 tp->dev->dev_addr[1]);
4800 addr_low = ((tp->dev->dev_addr[2] << 24) |
4801 (tp->dev->dev_addr[3] << 16) |
4802 (tp->dev->dev_addr[4] << 8) |
4803 (tp->dev->dev_addr[5] << 0));
4804 for (i = 0; i < 4; i++) {
4805 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4806 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4809 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4810 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4811 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4812 for (i = 0; i < 12; i++) {
4813 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4814 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4818 addr_high = (tp->dev->dev_addr[0] +
4819 tp->dev->dev_addr[1] +
4820 tp->dev->dev_addr[2] +
4821 tp->dev->dev_addr[3] +
4822 tp->dev->dev_addr[4] +
4823 tp->dev->dev_addr[5]) &
4824 TX_BACKOFF_SEED_MASK;
4825 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4828 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4830 struct tg3 *tp = netdev_priv(dev);
4831 struct sockaddr *addr = p;
4833 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4835 spin_lock_irq(&tp->lock);
4836 __tg3_set_mac_addr(tp);
4837 spin_unlock_irq(&tp->lock);
4842 /* tp->lock is held. */
4843 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4844 dma_addr_t mapping, u32 maxlen_flags,
4848 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4849 ((u64) mapping >> 32));
4851 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4852 ((u64) mapping & 0xffffffff));
4854 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4857 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4859 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4863 static void __tg3_set_rx_mode(struct net_device *);
4865 /* tp->lock is held. */
4866 static int tg3_reset_hw(struct tg3 *tp)
4868 u32 val, rdmac_mode;
4871 tg3_disable_ints(tp);
4875 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4877 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4878 err = tg3_abort_hw(tp);
4883 err = tg3_chip_reset(tp);
4887 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4889 /* This works around an issue with Athlon chipsets on
4890 * B3 tigon3 silicon. This bit has no effect on any
4891 * other revision. But do not set this on PCI Express
4894 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4895 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4896 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4898 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4899 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4900 val = tr32(TG3PCI_PCISTATE);
4901 val |= PCISTATE_RETRY_SAME_DMA;
4902 tw32(TG3PCI_PCISTATE, val);
4905 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4906 /* Enable some hw fixes. */
4907 val = tr32(TG3PCI_MSI_DATA);
4908 val |= (1 << 26) | (1 << 28) | (1 << 29);
4909 tw32(TG3PCI_MSI_DATA, val);
4912 /* Descriptor ring init may make accesses to the
4913 * NIC SRAM area to setup the TX descriptors, so we
4914 * can only do this after the hardware has been
4915 * successfully reset.
4919 /* This value is determined during the probe time DMA
4920 * engine test, tg3_test_dma.
4922 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4924 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4925 GRC_MODE_4X_NIC_SEND_RINGS |
4926 GRC_MODE_NO_TX_PHDR_CSUM |
4927 GRC_MODE_NO_RX_PHDR_CSUM);
4928 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4929 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4930 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4931 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4932 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4936 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4938 /* Setup the timer prescalar register. Clock is always 66Mhz. */
4939 val = tr32(GRC_MISC_CFG);
4941 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4942 tw32(GRC_MISC_CFG, val);
4944 /* Initialize MBUF/DESC pool. */
4945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4947 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4948 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4950 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4952 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4953 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4954 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4956 #if TG3_TSO_SUPPORT != 0
4957 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4960 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4961 TG3_TSO5_FW_RODATA_LEN +
4962 TG3_TSO5_FW_DATA_LEN +
4963 TG3_TSO5_FW_SBSS_LEN +
4964 TG3_TSO5_FW_BSS_LEN);
4965 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4966 tw32(BUFMGR_MB_POOL_ADDR,
4967 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4968 tw32(BUFMGR_MB_POOL_SIZE,
4969 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4973 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4974 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4975 tp->bufmgr_config.mbuf_read_dma_low_water);
4976 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4977 tp->bufmgr_config.mbuf_mac_rx_low_water);
4978 tw32(BUFMGR_MB_HIGH_WATER,
4979 tp->bufmgr_config.mbuf_high_water);
4981 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4982 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4983 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4984 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4985 tw32(BUFMGR_MB_HIGH_WATER,
4986 tp->bufmgr_config.mbuf_high_water_jumbo);
4988 tw32(BUFMGR_DMA_LOW_WATER,
4989 tp->bufmgr_config.dma_low_water);
4990 tw32(BUFMGR_DMA_HIGH_WATER,
4991 tp->bufmgr_config.dma_high_water);
4993 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4994 for (i = 0; i < 2000; i++) {
4995 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5000 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5005 /* Setup replenish threshold. */
5006 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5008 /* Initialize TG3_BDINFO's at:
5009 * RCVDBDI_STD_BD: standard eth size rx ring
5010 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5011 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5014 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5015 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5016 * ring attribute flags
5017 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5019 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5020 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5022 * The size of each ring is fixed in the firmware, but the location is
5025 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5026 ((u64) tp->rx_std_mapping >> 32));
5027 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5028 ((u64) tp->rx_std_mapping & 0xffffffff));
5029 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5030 NIC_SRAM_RX_BUFFER_DESC);
5032 /* Don't even try to program the JUMBO/MINI buffer descriptor
5035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5037 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5038 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5040 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5041 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5043 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5044 BDINFO_FLAGS_DISABLED);
5046 /* Setup replenish threshold. */
5047 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5049 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5050 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5051 ((u64) tp->rx_jumbo_mapping >> 32));
5052 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5053 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5054 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5055 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5056 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5057 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5059 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5060 BDINFO_FLAGS_DISABLED);
5065 /* There is only one send ring on 5705/5750, no need to explicitly
5066 * disable the others.
5068 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5069 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5070 /* Clear out send RCB ring in SRAM. */
5071 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5072 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5073 BDINFO_FLAGS_DISABLED);
5078 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5079 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5081 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5082 tp->tx_desc_mapping,
5083 (TG3_TX_RING_SIZE <<
5084 BDINFO_FLAGS_MAXLEN_SHIFT),
5085 NIC_SRAM_TX_BUFFER_DESC);
5087 /* There is only one receive return ring on 5705/5750, no need
5088 * to explicitly disable the others.
5090 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5092 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5093 i += TG3_BDINFO_SIZE) {
5094 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5095 BDINFO_FLAGS_DISABLED);
5100 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5102 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5104 (TG3_RX_RCB_RING_SIZE(tp) <<
5105 BDINFO_FLAGS_MAXLEN_SHIFT),
5108 tp->rx_std_ptr = tp->rx_pending;
5109 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5112 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5113 tp->rx_jumbo_pending : 0;
5114 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5117 /* Initialize MAC address and backoff seed. */
5118 __tg3_set_mac_addr(tp);
5120 /* MTU + ethernet header + FCS + optional VLAN tag */
5121 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5123 /* The slot time is changed by tg3_setup_phy if we
5124 * run at gigabit with half duplex.
5126 tw32(MAC_TX_LENGTHS,
5127 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5128 (6 << TX_LENGTHS_IPG_SHIFT) |
5129 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5131 /* Receive rules. */
5132 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5133 tw32(RCVLPC_CONFIG, 0x0181);
5135 /* Calculate RDMAC_MODE setting early, we need it to determine
5136 * the RCVLPC_STATE_ENABLE mask.
5138 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5139 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5140 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5141 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5142 RDMAC_MODE_LNGREAD_ENAB);
5143 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5144 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5145 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5146 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5147 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5148 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5149 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5150 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5151 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5152 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5153 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5154 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5158 #if TG3_TSO_SUPPORT != 0
5159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5160 rdmac_mode |= (1 << 27);
5163 /* Receive/send statistics. */
5164 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5165 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5166 val = tr32(RCVLPC_STATS_ENABLE);
5167 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5168 tw32(RCVLPC_STATS_ENABLE, val);
5170 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5172 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5173 tw32(SNDDATAI_STATSENAB, 0xffffff);
5174 tw32(SNDDATAI_STATSCTRL,
5175 (SNDDATAI_SCTRL_ENABLE |
5176 SNDDATAI_SCTRL_FASTUPD));
5178 /* Setup host coalescing engine. */
5179 tw32(HOSTCC_MODE, 0);
5180 for (i = 0; i < 2000; i++) {
5181 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5186 tw32(HOSTCC_RXCOL_TICKS, 0);
5187 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5188 tw32(HOSTCC_RXMAX_FRAMES, 1);
5189 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5190 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5191 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5192 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5193 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5195 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5196 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5198 /* set status block DMA address */
5199 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5200 ((u64) tp->status_mapping >> 32));
5201 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5202 ((u64) tp->status_mapping & 0xffffffff));
5204 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5206 /* Status/statistics block address. See tg3_timer,
5207 * the tg3_periodic_fetch_stats call there, and
5208 * tg3_get_stats to see how this works for 5705/5750 chips.
5210 tw32(HOSTCC_STAT_COAL_TICKS,
5211 DEFAULT_STAT_COAL_TICKS);
5212 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5213 ((u64) tp->stats_mapping >> 32));
5214 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5215 ((u64) tp->stats_mapping & 0xffffffff));
5216 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5217 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5220 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5222 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5223 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5224 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5225 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5226 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5228 /* Clear statistics/status block in chip, and status block in ram. */
5229 for (i = NIC_SRAM_STATS_BLK;
5230 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5232 tg3_write_mem(tp, i, 0);
5235 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5237 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5238 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5239 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5242 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5244 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5245 GRC_LCLCTRL_GPIO_OUTPUT1);
5246 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5249 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5250 tr32(MAILBOX_INTERRUPT_0);
5252 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5253 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5254 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5258 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5259 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5260 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5261 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5262 WDMAC_MODE_LNGREAD_ENAB);
5264 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5265 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5267 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5268 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5269 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5271 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5272 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5273 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5274 val |= WDMAC_MODE_RX_ACCEL;
5278 tw32_f(WDMAC_MODE, val);
5281 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5282 val = tr32(TG3PCI_X_CAPS);
5283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5284 val &= ~PCIX_CAPS_BURST_MASK;
5285 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5286 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5287 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5288 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5289 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5290 val |= (tp->split_mode_max_reqs <<
5291 PCIX_CAPS_SPLIT_SHIFT);
5293 tw32(TG3PCI_X_CAPS, val);
5296 tw32_f(RDMAC_MODE, rdmac_mode);
5299 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5300 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5301 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5302 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5303 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5304 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5305 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5306 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5307 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5308 #if TG3_TSO_SUPPORT != 0
5309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5310 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5312 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5313 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5315 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5316 err = tg3_load_5701_a0_firmware_fix(tp);
5321 #if TG3_TSO_SUPPORT != 0
5322 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5323 err = tg3_load_tso_firmware(tp);
5329 tp->tx_mode = TX_MODE_ENABLE;
5330 tw32_f(MAC_TX_MODE, tp->tx_mode);
5333 tp->rx_mode = RX_MODE_ENABLE;
5334 tw32_f(MAC_RX_MODE, tp->rx_mode);
5337 if (tp->link_config.phy_is_low_power) {
5338 tp->link_config.phy_is_low_power = 0;
5339 tp->link_config.speed = tp->link_config.orig_speed;
5340 tp->link_config.duplex = tp->link_config.orig_duplex;
5341 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5344 tp->mi_mode = MAC_MI_MODE_BASE;
5345 tw32_f(MAC_MI_MODE, tp->mi_mode);
5348 tw32(MAC_LED_CTRL, tp->led_ctrl);
5350 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5351 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5352 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5355 tw32_f(MAC_RX_MODE, tp->rx_mode);
5358 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5360 /* Set drive transmission level to 1.2V */
5361 val = tr32(MAC_SERDES_CFG);
5364 tw32(MAC_SERDES_CFG, val);
5366 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5367 tw32(MAC_SERDES_CFG, 0x616000);
5370 /* Prevent chip from dropping frames when flow control
5373 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5376 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5377 /* Use hardware link auto-negotiation */
5378 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5381 err = tg3_setup_phy(tp, 1);
5385 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5388 /* Clear CRC stats. */
5389 tg3_readphy(tp, 0x1e, &tmp);
5390 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5391 tg3_readphy(tp, 0x14, &tmp);
5394 __tg3_set_rx_mode(tp->dev);
5396 /* Initialize receive rules. */
5397 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5398 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5399 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5400 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5407 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5411 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5413 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5415 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5417 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5419 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5421 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5423 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5425 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5427 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5429 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5431 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5433 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5435 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5437 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5445 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5447 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5448 tg3_enable_ints(tp);
5453 /* Called at device open time to get the chip ready for
5454 * packet processing. Invoked with tp->lock held.
5456 static int tg3_init_hw(struct tg3 *tp)
5460 /* Force the chip into D0. */
5461 err = tg3_set_power_state(tp, 0);
5465 tg3_switch_clocks(tp);
5467 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5469 err = tg3_reset_hw(tp);
5475 #define TG3_STAT_ADD32(PSTAT, REG) \
5476 do { u32 __val = tr32(REG); \
5477 (PSTAT)->low += __val; \
5478 if ((PSTAT)->low < __val) \
5479 (PSTAT)->high += 1; \
5482 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5484 struct tg3_hw_stats *sp = tp->hw_stats;
5486 if (!netif_carrier_ok(tp->dev))
5489 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5490 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5491 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5492 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5493 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5494 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5495 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5496 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5497 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5498 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5499 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5500 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5501 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5503 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5504 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5505 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5506 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5507 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5508 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5509 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5510 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5511 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5512 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5513 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5514 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5515 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5516 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5519 static void tg3_timer(unsigned long __opaque)
5521 struct tg3 *tp = (struct tg3 *) __opaque;
5522 unsigned long flags;
5524 spin_lock_irqsave(&tp->lock, flags);
5525 spin_lock(&tp->tx_lock);
5527 /* All of this garbage is because when using non-tagged
5528 * IRQ status the mailbox/status_block protocol the chip
5529 * uses with the cpu is race prone.
5531 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5532 tw32(GRC_LOCAL_CTRL,
5533 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5535 tw32(HOSTCC_MODE, tp->coalesce_mode |
5536 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5539 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5540 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5541 spin_unlock(&tp->tx_lock);
5542 spin_unlock_irqrestore(&tp->lock, flags);
5543 schedule_work(&tp->reset_task);
5547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5549 tg3_periodic_fetch_stats(tp);
5551 /* This part only runs once per second. */
5552 if (!--tp->timer_counter) {
5553 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5557 mac_stat = tr32(MAC_STATUS);
5560 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5561 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5563 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5567 tg3_setup_phy(tp, 0);
5568 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5569 u32 mac_stat = tr32(MAC_STATUS);
5572 if (netif_carrier_ok(tp->dev) &&
5573 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5576 if (! netif_carrier_ok(tp->dev) &&
5577 (mac_stat & (MAC_STATUS_PCS_SYNCED |
5578 MAC_STATUS_SIGNAL_DET))) {
5584 ~MAC_MODE_PORT_MODE_MASK));
5586 tw32_f(MAC_MODE, tp->mac_mode);
5588 tg3_setup_phy(tp, 0);
5592 tp->timer_counter = tp->timer_multiplier;
5595 /* Heartbeat is only sent once every 120 seconds. */
5596 if (!--tp->asf_counter) {
5597 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5600 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5601 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5602 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5603 val = tr32(GRC_RX_CPU_EVENT);
5605 tw32(GRC_RX_CPU_EVENT, val);
5607 tp->asf_counter = tp->asf_multiplier;
5610 spin_unlock(&tp->tx_lock);
5611 spin_unlock_irqrestore(&tp->lock, flags);
5613 tp->timer.expires = jiffies + tp->timer_offset;
5614 add_timer(&tp->timer);
5617 static int tg3_open(struct net_device *dev)
5619 struct tg3 *tp = netdev_priv(dev);
5622 spin_lock_irq(&tp->lock);
5623 spin_lock(&tp->tx_lock);
5625 tg3_disable_ints(tp);
5626 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5628 spin_unlock(&tp->tx_lock);
5629 spin_unlock_irq(&tp->lock);
5631 /* The placement of this call is tied
5632 * to the setup and use of Host TX descriptors.
5634 err = tg3_alloc_consistent(tp);
5638 err = request_irq(dev->irq, tg3_interrupt,
5639 SA_SHIRQ, dev->name, dev);
5642 tg3_free_consistent(tp);
5646 spin_lock_irq(&tp->lock);
5647 spin_lock(&tp->tx_lock);
5649 err = tg3_init_hw(tp);
5654 tp->timer_offset = HZ / 10;
5655 tp->timer_counter = tp->timer_multiplier = 10;
5656 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5658 init_timer(&tp->timer);
5659 tp->timer.expires = jiffies + tp->timer_offset;
5660 tp->timer.data = (unsigned long) tp;
5661 tp->timer.function = tg3_timer;
5662 add_timer(&tp->timer);
5664 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5667 spin_unlock(&tp->tx_lock);
5668 spin_unlock_irq(&tp->lock);
5671 free_irq(dev->irq, dev);
5672 tg3_free_consistent(tp);
5676 spin_lock_irq(&tp->lock);
5677 spin_lock(&tp->tx_lock);
5679 tg3_enable_ints(tp);
5681 spin_unlock(&tp->tx_lock);
5682 spin_unlock_irq(&tp->lock);
5684 netif_start_queue(dev);
5690 /*static*/ void tg3_dump_state(struct tg3 *tp)
5692 u32 val32, val32_2, val32_3, val32_4, val32_5;
5696 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5697 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5698 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5702 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5703 tr32(MAC_MODE), tr32(MAC_STATUS));
5704 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5705 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5706 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5707 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5708 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5709 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5711 /* Send data initiator control block */
5712 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5713 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5714 printk(" SNDDATAI_STATSCTRL[%08x]\n",
5715 tr32(SNDDATAI_STATSCTRL));
5717 /* Send data completion control block */
5718 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5720 /* Send BD ring selector block */
5721 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5722 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5724 /* Send BD initiator control block */
5725 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5726 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5728 /* Send BD completion control block */
5729 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5731 /* Receive list placement control block */
5732 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5733 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5734 printk(" RCVLPC_STATSCTRL[%08x]\n",
5735 tr32(RCVLPC_STATSCTRL));
5737 /* Receive data and receive BD initiator control block */
5738 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5739 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5741 /* Receive data completion control block */
5742 printk("DEBUG: RCVDCC_MODE[%08x]\n",
5745 /* Receive BD initiator control block */
5746 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5747 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5749 /* Receive BD completion control block */
5750 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5751 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5753 /* Receive list selector control block */
5754 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5755 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5757 /* Mbuf cluster free block */
5758 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5759 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5761 /* Host coalescing control block */
5762 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5763 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5764 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5765 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5766 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5767 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5768 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5769 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5770 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5771 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5772 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5773 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5775 /* Memory arbiter control block */
5776 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5777 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5779 /* Buffer manager control block */
5780 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5781 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5782 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5783 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5784 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5785 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5786 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5787 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5789 /* Read DMA control block */
5790 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5791 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5793 /* Write DMA control block */
5794 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5795 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5797 /* DMA completion block */
5798 printk("DEBUG: DMAC_MODE[%08x]\n",
5802 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5803 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5804 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5805 tr32(GRC_LOCAL_CTRL));
5808 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5809 tr32(RCVDBDI_JUMBO_BD + 0x0),
5810 tr32(RCVDBDI_JUMBO_BD + 0x4),
5811 tr32(RCVDBDI_JUMBO_BD + 0x8),
5812 tr32(RCVDBDI_JUMBO_BD + 0xc));
5813 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5814 tr32(RCVDBDI_STD_BD + 0x0),
5815 tr32(RCVDBDI_STD_BD + 0x4),
5816 tr32(RCVDBDI_STD_BD + 0x8),
5817 tr32(RCVDBDI_STD_BD + 0xc));
5818 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5819 tr32(RCVDBDI_MINI_BD + 0x0),
5820 tr32(RCVDBDI_MINI_BD + 0x4),
5821 tr32(RCVDBDI_MINI_BD + 0x8),
5822 tr32(RCVDBDI_MINI_BD + 0xc));
5824 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5825 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5826 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5827 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5828 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5829 val32, val32_2, val32_3, val32_4);
5831 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5832 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5833 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5834 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5835 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5836 val32, val32_2, val32_3, val32_4);
5838 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5839 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5840 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5841 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5842 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5843 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5844 val32, val32_2, val32_3, val32_4, val32_5);
5846 /* SW status block */
5847 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5848 tp->hw_status->status,
5849 tp->hw_status->status_tag,
5850 tp->hw_status->rx_jumbo_consumer,
5851 tp->hw_status->rx_consumer,
5852 tp->hw_status->rx_mini_consumer,
5853 tp->hw_status->idx[0].rx_producer,
5854 tp->hw_status->idx[0].tx_consumer);
5856 /* SW statistics block */
5857 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5858 ((u32 *)tp->hw_stats)[0],
5859 ((u32 *)tp->hw_stats)[1],
5860 ((u32 *)tp->hw_stats)[2],
5861 ((u32 *)tp->hw_stats)[3]);
5864 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5865 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5866 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5867 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5868 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5870 /* NIC side send descriptors. */
5871 for (i = 0; i < 6; i++) {
5874 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5875 + (i * sizeof(struct tg3_tx_buffer_desc));
5876 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5878 readl(txd + 0x0), readl(txd + 0x4),
5879 readl(txd + 0x8), readl(txd + 0xc));
5882 /* NIC side RX descriptors. */
5883 for (i = 0; i < 6; i++) {
5886 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5887 + (i * sizeof(struct tg3_rx_buffer_desc));
5888 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5890 readl(rxd + 0x0), readl(rxd + 0x4),
5891 readl(rxd + 0x8), readl(rxd + 0xc));
5892 rxd += (4 * sizeof(u32));
5893 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5895 readl(rxd + 0x0), readl(rxd + 0x4),
5896 readl(rxd + 0x8), readl(rxd + 0xc));
5899 for (i = 0; i < 6; i++) {
5902 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5903 + (i * sizeof(struct tg3_rx_buffer_desc));
5904 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5906 readl(rxd + 0x0), readl(rxd + 0x4),
5907 readl(rxd + 0x8), readl(rxd + 0xc));
5908 rxd += (4 * sizeof(u32));
5909 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5911 readl(rxd + 0x0), readl(rxd + 0x4),
5912 readl(rxd + 0x8), readl(rxd + 0xc));
5917 static struct net_device_stats *tg3_get_stats(struct net_device *);
5918 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5920 static int tg3_close(struct net_device *dev)
5922 struct tg3 *tp = netdev_priv(dev);
5924 netif_stop_queue(dev);
5926 del_timer_sync(&tp->timer);
5928 spin_lock_irq(&tp->lock);
5929 spin_lock(&tp->tx_lock);
5934 tg3_disable_ints(tp);
5939 ~(TG3_FLAG_INIT_COMPLETE |
5940 TG3_FLAG_GOT_SERDES_FLOWCTL);
5941 netif_carrier_off(tp->dev);
5943 spin_unlock(&tp->tx_lock);
5944 spin_unlock_irq(&tp->lock);
5946 free_irq(dev->irq, dev);
5948 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5949 sizeof(tp->net_stats_prev));
5950 memcpy(&tp->estats_prev, tg3_get_estats(tp),
5951 sizeof(tp->estats_prev));
5953 tg3_free_consistent(tp);
5958 static inline unsigned long get_stat64(tg3_stat64_t *val)
5962 #if (BITS_PER_LONG == 32)
5965 ret = ((u64)val->high << 32) | ((u64)val->low);
5970 static unsigned long calc_crc_errors(struct tg3 *tp)
5972 struct tg3_hw_stats *hw_stats = tp->hw_stats;
5974 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
5975 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5977 unsigned long flags;
5980 spin_lock_irqsave(&tp->lock, flags);
5981 tg3_readphy(tp, 0x1e, &val);
5982 tg3_writephy(tp, 0x1e, val | 0x8000);
5983 tg3_readphy(tp, 0x14, &val);
5984 spin_unlock_irqrestore(&tp->lock, flags);
5986 tp->phy_crc_errors += val;
5988 return tp->phy_crc_errors;
5991 return get_stat64(&hw_stats->rx_fcs_errors);
5994 #define ESTAT_ADD(member) \
5995 estats->member = old_estats->member + \
5996 get_stat64(&hw_stats->member)
5998 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6000 struct tg3_ethtool_stats *estats = &tp->estats;
6001 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6002 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6007 ESTAT_ADD(rx_octets);
6008 ESTAT_ADD(rx_fragments);
6009 ESTAT_ADD(rx_ucast_packets);
6010 ESTAT_ADD(rx_mcast_packets);
6011 ESTAT_ADD(rx_bcast_packets);
6012 ESTAT_ADD(rx_fcs_errors);
6013 ESTAT_ADD(rx_align_errors);
6014 ESTAT_ADD(rx_xon_pause_rcvd);
6015 ESTAT_ADD(rx_xoff_pause_rcvd);
6016 ESTAT_ADD(rx_mac_ctrl_rcvd);
6017 ESTAT_ADD(rx_xoff_entered);
6018 ESTAT_ADD(rx_frame_too_long_errors);
6019 ESTAT_ADD(rx_jabbers);
6020 ESTAT_ADD(rx_undersize_packets);
6021 ESTAT_ADD(rx_in_length_errors);
6022 ESTAT_ADD(rx_out_length_errors);
6023 ESTAT_ADD(rx_64_or_less_octet_packets);
6024 ESTAT_ADD(rx_65_to_127_octet_packets);
6025 ESTAT_ADD(rx_128_to_255_octet_packets);
6026 ESTAT_ADD(rx_256_to_511_octet_packets);
6027 ESTAT_ADD(rx_512_to_1023_octet_packets);
6028 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6029 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6030 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6031 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6032 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6034 ESTAT_ADD(tx_octets);
6035 ESTAT_ADD(tx_collisions);
6036 ESTAT_ADD(tx_xon_sent);
6037 ESTAT_ADD(tx_xoff_sent);
6038 ESTAT_ADD(tx_flow_control);
6039 ESTAT_ADD(tx_mac_errors);
6040 ESTAT_ADD(tx_single_collisions);
6041 ESTAT_ADD(tx_mult_collisions);
6042 ESTAT_ADD(tx_deferred);
6043 ESTAT_ADD(tx_excessive_collisions);
6044 ESTAT_ADD(tx_late_collisions);
6045 ESTAT_ADD(tx_collide_2times);
6046 ESTAT_ADD(tx_collide_3times);
6047 ESTAT_ADD(tx_collide_4times);
6048 ESTAT_ADD(tx_collide_5times);
6049 ESTAT_ADD(tx_collide_6times);
6050 ESTAT_ADD(tx_collide_7times);
6051 ESTAT_ADD(tx_collide_8times);
6052 ESTAT_ADD(tx_collide_9times);
6053 ESTAT_ADD(tx_collide_10times);
6054 ESTAT_ADD(tx_collide_11times);
6055 ESTAT_ADD(tx_collide_12times);
6056 ESTAT_ADD(tx_collide_13times);
6057 ESTAT_ADD(tx_collide_14times);
6058 ESTAT_ADD(tx_collide_15times);
6059 ESTAT_ADD(tx_ucast_packets);
6060 ESTAT_ADD(tx_mcast_packets);
6061 ESTAT_ADD(tx_bcast_packets);
6062 ESTAT_ADD(tx_carrier_sense_errors);
6063 ESTAT_ADD(tx_discards);
6064 ESTAT_ADD(tx_errors);
6066 ESTAT_ADD(dma_writeq_full);
6067 ESTAT_ADD(dma_write_prioq_full);
6068 ESTAT_ADD(rxbds_empty);
6069 ESTAT_ADD(rx_discards);
6070 ESTAT_ADD(rx_errors);
6071 ESTAT_ADD(rx_threshold_hit);
6073 ESTAT_ADD(dma_readq_full);
6074 ESTAT_ADD(dma_read_prioq_full);
6075 ESTAT_ADD(tx_comp_queue_full);
6077 ESTAT_ADD(ring_set_send_prod_index);
6078 ESTAT_ADD(ring_status_update);
6079 ESTAT_ADD(nic_irqs);
6080 ESTAT_ADD(nic_avoided_irqs);
6081 ESTAT_ADD(nic_tx_threshold_hit);
6086 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6088 struct tg3 *tp = netdev_priv(dev);
6089 struct net_device_stats *stats = &tp->net_stats;
6090 struct net_device_stats *old_stats = &tp->net_stats_prev;
6091 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6096 stats->rx_packets = old_stats->rx_packets +
6097 get_stat64(&hw_stats->rx_ucast_packets) +
6098 get_stat64(&hw_stats->rx_mcast_packets) +
6099 get_stat64(&hw_stats->rx_bcast_packets);
6101 stats->tx_packets = old_stats->tx_packets +
6102 get_stat64(&hw_stats->tx_ucast_packets) +
6103 get_stat64(&hw_stats->tx_mcast_packets) +
6104 get_stat64(&hw_stats->tx_bcast_packets);
6106 stats->rx_bytes = old_stats->rx_bytes +
6107 get_stat64(&hw_stats->rx_octets);
6108 stats->tx_bytes = old_stats->tx_bytes +
6109 get_stat64(&hw_stats->tx_octets);
6111 stats->rx_errors = old_stats->rx_errors +
6112 get_stat64(&hw_stats->rx_errors) +
6113 get_stat64(&hw_stats->rx_discards);
6114 stats->tx_errors = old_stats->tx_errors +
6115 get_stat64(&hw_stats->tx_errors) +
6116 get_stat64(&hw_stats->tx_mac_errors) +
6117 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6118 get_stat64(&hw_stats->tx_discards);
6120 stats->multicast = old_stats->multicast +
6121 get_stat64(&hw_stats->rx_mcast_packets);
6122 stats->collisions = old_stats->collisions +
6123 get_stat64(&hw_stats->tx_collisions);
6125 stats->rx_length_errors = old_stats->rx_length_errors +
6126 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6127 get_stat64(&hw_stats->rx_undersize_packets);
6129 stats->rx_over_errors = old_stats->rx_over_errors +
6130 get_stat64(&hw_stats->rxbds_empty);
6131 stats->rx_frame_errors = old_stats->rx_frame_errors +
6132 get_stat64(&hw_stats->rx_align_errors);
6133 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6134 get_stat64(&hw_stats->tx_discards);
6135 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6136 get_stat64(&hw_stats->tx_carrier_sense_errors);
6138 stats->rx_crc_errors = old_stats->rx_crc_errors +
6139 calc_crc_errors(tp);
6144 static inline u32 calc_crc(unsigned char *buf, int len)
6152 for (j = 0; j < len; j++) {
6155 for (k = 0; k < 8; k++) {
6169 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6171 /* accept or reject all multicast frames */
6172 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6173 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6174 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6175 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6178 static void __tg3_set_rx_mode(struct net_device *dev)
6180 struct tg3 *tp = netdev_priv(dev);
6183 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6184 RX_MODE_KEEP_VLAN_TAG);
6186 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6189 #if TG3_VLAN_TAG_USED
6191 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6192 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6194 /* By definition, VLAN is disabled always in this
6197 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6198 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6201 if (dev->flags & IFF_PROMISC) {
6202 /* Promiscuous mode. */
6203 rx_mode |= RX_MODE_PROMISC;
6204 } else if (dev->flags & IFF_ALLMULTI) {
6205 /* Accept all multicast. */
6206 tg3_set_multi (tp, 1);
6207 } else if (dev->mc_count < 1) {
6208 /* Reject all multicast. */
6209 tg3_set_multi (tp, 0);
6211 /* Accept one or more multicast(s). */
6212 struct dev_mc_list *mclist;
6214 u32 mc_filter[4] = { 0, };
6219 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6220 i++, mclist = mclist->next) {
6222 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6224 regidx = (bit & 0x60) >> 5;
6226 mc_filter[regidx] |= (1 << bit);
6229 tw32(MAC_HASH_REG_0, mc_filter[0]);
6230 tw32(MAC_HASH_REG_1, mc_filter[1]);
6231 tw32(MAC_HASH_REG_2, mc_filter[2]);
6232 tw32(MAC_HASH_REG_3, mc_filter[3]);
6235 if (rx_mode != tp->rx_mode) {
6236 tp->rx_mode = rx_mode;
6237 tw32_f(MAC_RX_MODE, rx_mode);
6242 static void tg3_set_rx_mode(struct net_device *dev)
6244 struct tg3 *tp = netdev_priv(dev);
6246 spin_lock_irq(&tp->lock);
6247 spin_lock(&tp->tx_lock);
6248 __tg3_set_rx_mode(dev);
6249 spin_unlock(&tp->tx_lock);
6250 spin_unlock_irq(&tp->lock);
6253 #define TG3_REGDUMP_LEN (32 * 1024)
6255 static int tg3_get_regs_len(struct net_device *dev)
6257 return TG3_REGDUMP_LEN;
6260 static void tg3_get_regs(struct net_device *dev,
6261 struct ethtool_regs *regs, void *_p)
6264 struct tg3 *tp = netdev_priv(dev);
6270 memset(p, 0, TG3_REGDUMP_LEN);
6272 spin_lock_irq(&tp->lock);
6273 spin_lock(&tp->tx_lock);
6275 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6276 #define GET_REG32_LOOP(base,len) \
6277 do { p = (u32 *)(orig_p + (base)); \
6278 for (i = 0; i < len; i += 4) \
6279 __GET_REG32((base) + i); \
6281 #define GET_REG32_1(reg) \
6282 do { p = (u32 *)(orig_p + (reg)); \
6283 __GET_REG32((reg)); \
6286 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6287 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6288 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6289 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6290 GET_REG32_1(SNDDATAC_MODE);
6291 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6292 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6293 GET_REG32_1(SNDBDC_MODE);
6294 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6295 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6296 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6297 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6298 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6299 GET_REG32_1(RCVDCC_MODE);
6300 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6301 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6302 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6303 GET_REG32_1(MBFREE_MODE);
6304 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6305 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6306 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6307 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6308 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6309 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6310 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6311 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6312 GET_REG32_LOOP(FTQ_RESET, 0x120);
6313 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6314 GET_REG32_1(DMAC_MODE);
6315 GET_REG32_LOOP(GRC_MODE, 0x4c);
6316 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6317 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6320 #undef GET_REG32_LOOP
6323 spin_unlock(&tp->tx_lock);
6324 spin_unlock_irq(&tp->lock);
6327 static int tg3_get_eeprom_len(struct net_device *dev)
6329 return EEPROM_CHIP_SIZE;
6332 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6333 u32 offset, u32 *val);
6334 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6336 struct tg3 *tp = dev->priv;
6339 u32 i, offset, len, val, b_offset, b_count;
6341 offset = eeprom->offset;
6345 ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6348 eeprom->magic = swab32(eeprom->magic);
6351 /* adjustments to start on required 4 byte boundary */
6352 b_offset = offset & 3;
6353 b_count = 4 - b_offset;
6354 if (b_count > len) {
6355 /* i.e. offset=1 len=2 */
6358 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6361 memcpy(data, ((char*)&val) + b_offset, b_count);
6364 eeprom->len += b_count;
6367 /* read bytes upto the last 4 byte boundary */
6368 pd = &data[eeprom->len];
6369 for (i = 0; i < (len - (len & 3)); i += 4) {
6370 ret = tg3_nvram_read_using_eeprom(tp, offset + i,
6380 /* read last bytes not ending on 4 byte boundary */
6381 pd = &data[eeprom->len];
6383 b_offset = offset + len - b_count;
6384 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6387 memcpy(pd, ((char*)&val), b_count);
6388 eeprom->len += b_count;
6393 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6395 struct tg3 *tp = netdev_priv(dev);
6397 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6398 tp->link_config.phy_is_low_power)
6401 cmd->supported = (SUPPORTED_Autoneg);
6403 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6404 cmd->supported |= (SUPPORTED_1000baseT_Half |
6405 SUPPORTED_1000baseT_Full);
6407 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6408 cmd->supported |= (SUPPORTED_100baseT_Half |
6409 SUPPORTED_100baseT_Full |
6410 SUPPORTED_10baseT_Half |
6411 SUPPORTED_10baseT_Full |
6414 cmd->supported |= SUPPORTED_FIBRE;
6416 cmd->advertising = tp->link_config.advertising;
6417 cmd->speed = tp->link_config.active_speed;
6418 cmd->duplex = tp->link_config.active_duplex;
6420 cmd->phy_address = PHY_ADDR;
6421 cmd->transceiver = 0;
6422 cmd->autoneg = tp->link_config.autoneg;
6428 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6430 struct tg3 *tp = netdev_priv(dev);
6432 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6433 tp->link_config.phy_is_low_power)
6436 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6437 /* These are the only valid advertisement bits allowed. */
6438 if (cmd->autoneg == AUTONEG_ENABLE &&
6439 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6440 ADVERTISED_1000baseT_Full |
6441 ADVERTISED_Autoneg |
6446 spin_lock_irq(&tp->lock);
6447 spin_lock(&tp->tx_lock);
6449 tp->link_config.autoneg = cmd->autoneg;
6450 if (cmd->autoneg == AUTONEG_ENABLE) {
6451 tp->link_config.advertising = cmd->advertising;
6452 tp->link_config.speed = SPEED_INVALID;
6453 tp->link_config.duplex = DUPLEX_INVALID;
6455 tp->link_config.advertising = 0;
6456 tp->link_config.speed = cmd->speed;
6457 tp->link_config.duplex = cmd->duplex;
6460 tg3_setup_phy(tp, 1);
6461 spin_unlock(&tp->tx_lock);
6462 spin_unlock_irq(&tp->lock);
6467 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6469 struct tg3 *tp = netdev_priv(dev);
6471 strcpy(info->driver, DRV_MODULE_NAME);
6472 strcpy(info->version, DRV_MODULE_VERSION);
6473 strcpy(info->bus_info, pci_name(tp->pdev));
6476 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6478 struct tg3 *tp = netdev_priv(dev);
6480 wol->supported = WAKE_MAGIC;
6482 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6483 wol->wolopts = WAKE_MAGIC;
6484 memset(&wol->sopass, 0, sizeof(wol->sopass));
6487 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6489 struct tg3 *tp = netdev_priv(dev);
6491 if (wol->wolopts & ~WAKE_MAGIC)
6493 if ((wol->wolopts & WAKE_MAGIC) &&
6494 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6495 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6498 spin_lock_irq(&tp->lock);
6499 if (wol->wolopts & WAKE_MAGIC)
6500 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6502 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6503 spin_unlock_irq(&tp->lock);
6508 static u32 tg3_get_msglevel(struct net_device *dev)
6510 struct tg3 *tp = netdev_priv(dev);
6511 return tp->msg_enable;
6514 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6516 struct tg3 *tp = netdev_priv(dev);
6517 tp->msg_enable = value;
6520 #if TG3_TSO_SUPPORT != 0
6521 static int tg3_set_tso(struct net_device *dev, u32 value)
6523 struct tg3 *tp = netdev_priv(dev);
6525 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6530 return ethtool_op_set_tso(dev, value);
6534 static int tg3_nway_reset(struct net_device *dev)
6536 struct tg3 *tp = netdev_priv(dev);
6540 spin_lock_irq(&tp->lock);
6541 tg3_readphy(tp, MII_BMCR, &bmcr);
6542 tg3_readphy(tp, MII_BMCR, &bmcr);
6544 if (bmcr & BMCR_ANENABLE) {
6545 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6548 spin_unlock_irq(&tp->lock);
6553 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6555 struct tg3 *tp = netdev_priv(dev);
6557 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6558 ering->rx_mini_max_pending = 0;
6559 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6561 ering->rx_pending = tp->rx_pending;
6562 ering->rx_mini_pending = 0;
6563 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6564 ering->tx_pending = tp->tx_pending;
6567 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6569 struct tg3 *tp = netdev_priv(dev);
6571 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6572 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6573 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6577 spin_lock_irq(&tp->lock);
6578 spin_lock(&tp->tx_lock);
6580 tp->rx_pending = ering->rx_pending;
6582 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6583 tp->rx_pending > 63)
6584 tp->rx_pending = 63;
6585 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6586 tp->tx_pending = ering->tx_pending;
6590 tg3_netif_start(tp);
6591 spin_unlock(&tp->tx_lock);
6592 spin_unlock_irq(&tp->lock);
6597 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6599 struct tg3 *tp = netdev_priv(dev);
6601 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6602 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6603 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6606 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6608 struct tg3 *tp = netdev_priv(dev);
6611 spin_lock_irq(&tp->lock);
6612 spin_lock(&tp->tx_lock);
6613 if (epause->autoneg)
6614 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6616 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6617 if (epause->rx_pause)
6618 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6620 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6621 if (epause->tx_pause)
6622 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6624 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6627 tg3_netif_start(tp);
6628 spin_unlock(&tp->tx_lock);
6629 spin_unlock_irq(&tp->lock);
6634 static u32 tg3_get_rx_csum(struct net_device *dev)
6636 struct tg3 *tp = netdev_priv(dev);
6637 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6640 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6642 struct tg3 *tp = netdev_priv(dev);
6644 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6650 spin_lock_irq(&tp->lock);
6652 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6654 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6655 spin_unlock_irq(&tp->lock);
6660 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6662 struct tg3 *tp = netdev_priv(dev);
6664 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6671 dev->features |= NETIF_F_IP_CSUM;
6673 dev->features &= ~NETIF_F_IP_CSUM;
6678 static int tg3_get_stats_count (struct net_device *dev)
6680 return TG3_NUM_STATS;
6683 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6685 switch (stringset) {
6687 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
6690 WARN_ON(1); /* we need a WARN() */
6695 static void tg3_get_ethtool_stats (struct net_device *dev,
6696 struct ethtool_stats *estats, u64 *tmp_stats)
6698 struct tg3 *tp = dev->priv;
6699 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6702 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6704 struct mii_ioctl_data *data = if_mii(ifr);
6705 struct tg3 *tp = netdev_priv(dev);
6710 data->phy_id = PHY_ADDR;
6716 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6717 break; /* We have no PHY */
6719 spin_lock_irq(&tp->lock);
6720 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6721 spin_unlock_irq(&tp->lock);
6723 data->val_out = mii_regval;
6729 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6730 break; /* We have no PHY */
6732 if (!capable(CAP_NET_ADMIN))
6735 spin_lock_irq(&tp->lock);
6736 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6737 spin_unlock_irq(&tp->lock);
6748 #if TG3_VLAN_TAG_USED
6749 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6751 struct tg3 *tp = netdev_priv(dev);
6753 spin_lock_irq(&tp->lock);
6754 spin_lock(&tp->tx_lock);
6758 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6759 __tg3_set_rx_mode(dev);
6761 spin_unlock(&tp->tx_lock);
6762 spin_unlock_irq(&tp->lock);
6765 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6767 struct tg3 *tp = netdev_priv(dev);
6769 spin_lock_irq(&tp->lock);
6770 spin_lock(&tp->tx_lock);
6772 tp->vlgrp->vlan_devices[vid] = NULL;
6773 spin_unlock(&tp->tx_lock);
6774 spin_unlock_irq(&tp->lock);
6778 static struct ethtool_ops tg3_ethtool_ops = {
6779 .get_settings = tg3_get_settings,
6780 .set_settings = tg3_set_settings,
6781 .get_drvinfo = tg3_get_drvinfo,
6782 .get_regs_len = tg3_get_regs_len,
6783 .get_regs = tg3_get_regs,
6784 .get_wol = tg3_get_wol,
6785 .set_wol = tg3_set_wol,
6786 .get_msglevel = tg3_get_msglevel,
6787 .set_msglevel = tg3_set_msglevel,
6788 .nway_reset = tg3_nway_reset,
6789 .get_link = ethtool_op_get_link,
6790 .get_eeprom_len = tg3_get_eeprom_len,
6791 .get_eeprom = tg3_get_eeprom,
6792 .get_ringparam = tg3_get_ringparam,
6793 .set_ringparam = tg3_set_ringparam,
6794 .get_pauseparam = tg3_get_pauseparam,
6795 .set_pauseparam = tg3_set_pauseparam,
6796 .get_rx_csum = tg3_get_rx_csum,
6797 .set_rx_csum = tg3_set_rx_csum,
6798 .get_tx_csum = ethtool_op_get_tx_csum,
6799 .set_tx_csum = tg3_set_tx_csum,
6800 .get_sg = ethtool_op_get_sg,
6801 .set_sg = ethtool_op_set_sg,
6802 #if TG3_TSO_SUPPORT != 0
6803 .get_tso = ethtool_op_get_tso,
6804 .set_tso = tg3_set_tso,
6806 .get_strings = tg3_get_strings,
6807 .get_stats_count = tg3_get_stats_count,
6808 .get_ethtool_stats = tg3_get_ethtool_stats,
6811 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6812 static void __devinit tg3_nvram_init(struct tg3 *tp)
6816 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
6819 tw32_f(GRC_EEPROM_ADDR,
6820 (EEPROM_ADDR_FSM_RESET |
6821 (EEPROM_DEFAULT_CLOCK_PERIOD <<
6822 EEPROM_ADDR_CLKPERD_SHIFT)));
6824 /* XXX schedule_timeout() ... */
6825 for (j = 0; j < 100; j++)
6828 /* Enable seeprom accesses. */
6829 tw32_f(GRC_LOCAL_CTRL,
6830 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6833 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6834 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6838 u32 nvaccess = tr32(NVRAM_ACCESS);
6840 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6843 nvcfg1 = tr32(NVRAM_CFG1);
6845 tp->tg3_flags |= TG3_FLAG_NVRAM;
6846 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6847 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6848 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6850 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6851 tw32(NVRAM_CFG1, nvcfg1);
6854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6855 u32 nvaccess = tr32(NVRAM_ACCESS);
6857 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6860 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6864 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6865 u32 offset, u32 *val)
6870 if (offset > EEPROM_ADDR_ADDR_MASK ||
6874 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6875 EEPROM_ADDR_DEVID_MASK |
6877 tw32(GRC_EEPROM_ADDR,
6879 (0 << EEPROM_ADDR_DEVID_SHIFT) |
6880 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6881 EEPROM_ADDR_ADDR_MASK) |
6882 EEPROM_ADDR_READ | EEPROM_ADDR_START);
6884 for (i = 0; i < 10000; i++) {
6885 tmp = tr32(GRC_EEPROM_ADDR);
6887 if (tmp & EEPROM_ADDR_COMPLETE)
6891 if (!(tmp & EEPROM_ADDR_COMPLETE))
6894 *val = tr32(GRC_EEPROM_DATA);
6898 static int __devinit tg3_nvram_read(struct tg3 *tp,
6899 u32 offset, u32 *val)
6903 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
6904 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
6908 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6909 return tg3_nvram_read_using_eeprom(tp, offset, val);
6911 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6912 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6913 NVRAM_BUFFERED_PAGE_POS) +
6914 (offset % NVRAM_BUFFERED_PAGE_SIZE);
6916 if (offset > NVRAM_ADDR_MSK)
6921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6922 u32 nvaccess = tr32(NVRAM_ACCESS);
6924 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6927 tw32(NVRAM_ADDR, offset);
6929 NVRAM_CMD_RD | NVRAM_CMD_GO |
6930 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6932 /* Wait for done bit to clear. */
6933 for (i = 0; i < 1000; i++) {
6935 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6937 *val = swab32(tr32(NVRAM_RDDATA));
6942 tg3_nvram_unlock(tp);
6944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6945 u32 nvaccess = tr32(NVRAM_ACCESS);
6947 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6956 struct subsys_tbl_ent {
6957 u16 subsys_vendor, subsys_devid;
6961 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6962 /* Broadcom boards. */
6963 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6964 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6965 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6966 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
6967 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6968 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6969 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
6970 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6971 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6972 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6973 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6976 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6977 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6978 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
6979 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6980 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6983 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6984 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6985 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6986 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6988 /* Compaq boards. */
6989 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6990 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6991 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
6992 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6993 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6996 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
6999 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7003 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7004 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7005 tp->pdev->subsystem_vendor) &&
7006 (subsys_id_to_phy_id[i].subsys_devid ==
7007 tp->pdev->subsystem_device))
7008 return &subsys_id_to_phy_id[i];
7013 static int __devinit tg3_phy_probe(struct tg3 *tp)
7015 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7016 u32 hw_phy_id, hw_phy_id_masked;
7018 int eeprom_signature_found, eeprom_phy_serdes, err;
7020 tp->phy_id = PHY_ID_INVALID;
7021 eeprom_phy_id = PHY_ID_INVALID;
7022 eeprom_phy_serdes = 0;
7023 eeprom_signature_found = 0;
7024 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7025 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7026 u32 nic_cfg, led_cfg;
7027 u32 nic_phy_id, cfg2;
7029 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7030 tp->nic_sram_data_cfg = nic_cfg;
7032 eeprom_signature_found = 1;
7034 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7035 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7036 eeprom_phy_serdes = 1;
7038 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7039 if (nic_phy_id != 0) {
7040 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7041 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7043 eeprom_phy_id = (id1 >> 16) << 10;
7044 eeprom_phy_id |= (id2 & 0xfc00) << 16;
7045 eeprom_phy_id |= (id2 & 0x03ff) << 0;
7049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7050 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7051 led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7052 SHASTA_EXT_LED_MODE_MASK);
7054 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7058 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7059 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7062 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7063 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7066 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7067 tp->led_ctrl = LED_CTRL_MODE_MAC;
7070 case SHASTA_EXT_LED_SHARED:
7071 tp->led_ctrl = LED_CTRL_MODE_SHARED;
7072 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7073 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7074 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7075 LED_CTRL_MODE_PHY_2);
7078 case SHASTA_EXT_LED_MAC:
7079 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7082 case SHASTA_EXT_LED_COMBO:
7083 tp->led_ctrl = LED_CTRL_MODE_COMBO;
7084 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7085 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7086 LED_CTRL_MODE_PHY_2);
7091 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7093 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7094 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7096 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7097 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7098 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7099 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7100 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7102 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7103 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7105 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7107 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7108 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7110 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &cfg2);
7111 if (cfg2 & (1 << 17))
7112 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7115 /* Reading the PHY ID register can conflict with ASF
7116 * firwmare access to the PHY hardware.
7119 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7120 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7122 /* Now read the physical PHY_ID from the chip and verify
7123 * that it is sane. If it doesn't look good, we fall back
7124 * to either the hard-coded table based PHY_ID and failing
7125 * that the value found in the eeprom area.
7127 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7128 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7130 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
7131 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7132 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
7134 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7137 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7138 tp->phy_id = hw_phy_id;
7139 if (hw_phy_id_masked == PHY_ID_BCM8002)
7140 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7142 if (eeprom_signature_found) {
7143 tp->phy_id = eeprom_phy_id;
7144 if (eeprom_phy_serdes)
7145 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7147 struct subsys_tbl_ent *p;
7149 /* No eeprom signature? Try the hardcoded
7150 * subsys device table.
7152 p = lookup_by_subsys(tp);
7156 tp->phy_id = p->phy_id;
7158 tp->phy_id == PHY_ID_BCM8002)
7159 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7163 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7164 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7165 u32 bmsr, adv_reg, tg3_ctrl;
7167 tg3_readphy(tp, MII_BMSR, &bmsr);
7168 tg3_readphy(tp, MII_BMSR, &bmsr);
7170 if (bmsr & BMSR_LSTATUS)
7171 goto skip_phy_reset;
7173 err = tg3_phy_reset(tp);
7177 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7178 ADVERTISE_100HALF | ADVERTISE_100FULL |
7179 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7181 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7182 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7183 MII_TG3_CTRL_ADV_1000_FULL);
7184 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7185 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7186 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7187 MII_TG3_CTRL_ENABLE_AS_MASTER);
7190 if (!tg3_copper_is_advertising_all(tp)) {
7191 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7193 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7194 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7196 tg3_writephy(tp, MII_BMCR,
7197 BMCR_ANENABLE | BMCR_ANRESTART);
7199 tg3_phy_set_wirespeed(tp);
7201 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7202 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7203 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7207 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7208 err = tg3_init_5401phy_dsp(tp);
7213 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7214 err = tg3_init_5401phy_dsp(tp);
7217 if (!eeprom_signature_found)
7218 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7220 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7221 tp->link_config.advertising =
7222 (ADVERTISED_1000baseT_Half |
7223 ADVERTISED_1000baseT_Full |
7224 ADVERTISED_Autoneg |
7226 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7227 tp->link_config.advertising &=
7228 ~(ADVERTISED_1000baseT_Half |
7229 ADVERTISED_1000baseT_Full);
7234 static void __devinit tg3_read_partno(struct tg3 *tp)
7236 unsigned char vpd_data[256];
7239 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7240 /* Sun decided not to put the necessary bits in the
7241 * NVRAM of their onboard tg3 parts :(
7243 strcpy(tp->board_part_number, "Sun 570X");
7247 for (i = 0; i < 256; i += 4) {
7250 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7253 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
7254 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
7255 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7256 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7259 /* Now parse and find the part number. */
7260 for (i = 0; i < 256; ) {
7261 unsigned char val = vpd_data[i];
7264 if (val == 0x82 || val == 0x91) {
7267 (vpd_data[i + 2] << 8)));
7274 block_end = (i + 3 +
7276 (vpd_data[i + 2] << 8)));
7278 while (i < block_end) {
7279 if (vpd_data[i + 0] == 'P' &&
7280 vpd_data[i + 1] == 'N') {
7281 int partno_len = vpd_data[i + 2];
7283 if (partno_len > 24)
7286 memcpy(tp->board_part_number,
7295 /* Part number not found. */
7300 strcpy(tp->board_part_number, "none");
7303 #ifdef CONFIG_SPARC64
7304 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7306 struct pci_dev *pdev = tp->pdev;
7307 struct pcidev_cookie *pcp = pdev->sysdata;
7310 int node = pcp->prom_node;
7314 err = prom_getproperty(node, "subsystem-vendor-id",
7315 (char *) &venid, sizeof(venid));
7316 if (err == 0 || err == -1)
7318 if (venid == PCI_VENDOR_ID_SUN)
7325 static int __devinit tg3_get_invariants(struct tg3 *tp)
7328 u32 cacheline_sz_reg;
7329 u32 pci_state_reg, grc_misc_cfg;
7334 #ifdef CONFIG_SPARC64
7335 if (tg3_is_sun_570X(tp))
7336 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7339 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7340 * reordering to the mailbox registers done by the host
7341 * controller can cause major troubles. We read back from
7342 * every mailbox register write to force the writes to be
7343 * posted to the chip in order.
7345 if (pci_find_device(PCI_VENDOR_ID_INTEL,
7346 PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7347 pci_find_device(PCI_VENDOR_ID_INTEL,
7348 PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7349 pci_find_device(PCI_VENDOR_ID_INTEL,
7350 PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7351 pci_find_device(PCI_VENDOR_ID_INTEL,
7352 PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7353 pci_find_device(PCI_VENDOR_ID_AMD,
7354 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7355 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7357 /* Force memory write invalidate off. If we leave it on,
7358 * then on 5700_BX chips we have to enable a workaround.
7359 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7360 * to match the cacheline size. The Broadcom driver have this
7361 * workaround but turns MWI off all the times so never uses
7362 * it. This seems to suggest that the workaround is insufficient.
7364 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7365 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7366 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7368 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7369 * has the register indirect write enable bit set before
7370 * we try to access any of the MMIO registers. It is also
7371 * critical that the PCI-X hw workaround situation is decided
7372 * before that as well.
7374 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7377 tp->pci_chip_rev_id = (misc_ctrl_reg >>
7378 MISC_HOST_CTRL_CHIPREV_SHIFT);
7380 /* Initialize misc host control in PCI block. */
7381 tp->misc_host_ctrl |= (misc_ctrl_reg &
7382 MISC_HOST_CTRL_CHIPREV);
7383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7384 tp->misc_host_ctrl);
7386 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7389 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
7390 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
7391 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
7392 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
7394 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7395 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7398 tp->pci_lat_timer < 64) {
7399 tp->pci_lat_timer = 64;
7401 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
7402 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
7403 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
7404 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
7406 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7410 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7413 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7414 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7416 /* If this is a 5700 BX chipset, and we are in PCI-X
7417 * mode, enable register write workaround.
7419 * The workaround is to use indirect register accesses
7420 * for all chip writes not to mailbox registers.
7422 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7426 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7428 /* The chip can have it's power management PCI config
7429 * space registers clobbered due to this bug.
7430 * So explicitly force the chip into D0 here.
7432 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7434 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7435 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7436 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7439 /* Also, force SERR#/PERR# in PCI command. */
7440 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7441 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7442 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7446 /* Back to back register writes can cause problems on this chip,
7447 * the workaround is to read back all reg writes except those to
7448 * mailbox regs. See tg3_write_indirect_reg32().
7450 * PCI Express 5750_A0 rev chips need this workaround too.
7452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7453 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7454 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7455 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7457 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7458 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7459 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7460 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7462 /* Chip-specific fixup from Broadcom driver */
7463 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7464 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7465 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7466 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7469 /* Force the chip into D0. */
7470 err = tg3_set_power_state(tp, 0);
7472 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7473 pci_name(tp->pdev));
7477 /* 5700 B0 chips do not support checksumming correctly due
7480 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7481 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7483 /* Pseudo-header checksum is done by hardware logic and not
7484 * the offload processers, so make the chip do the pseudo-
7485 * header checksums on receive. For transmit it is more
7486 * convenient to do the pseudo-header checksum in software
7487 * as Linux does that on transmit for us in all cases.
7489 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7490 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7492 /* Derive initial jumbo mode from MTU assigned in
7493 * ether_setup() via the alloc_etherdev() call
7495 if (tp->dev->mtu > ETH_DATA_LEN)
7496 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7498 /* Determine WakeOnLan speed to use. */
7499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7500 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7501 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7502 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7503 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7505 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7508 /* A few boards don't want Ethernet@WireSpeed phy feature */
7509 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7510 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7511 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7512 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7513 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7515 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7516 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7517 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7518 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7519 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7523 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7525 /* Only 5701 and later support tagged irq status mode.
7526 * Also, 5788 chips cannot use tagged irq status.
7528 * However, since we are using NAPI avoid tagged irq status
7529 * because the interrupt condition is more difficult to
7530 * fully clear in that mode.
7532 tp->coalesce_mode = 0;
7534 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7535 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7536 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7538 /* Initialize MAC MI mode, polling disabled. */
7539 tw32_f(MAC_MI_MODE, tp->mi_mode);
7542 /* Initialize data/descriptor byte/word swapping. */
7543 val = tr32(GRC_MODE);
7544 val &= GRC_MODE_HOST_STACKUP;
7545 tw32(GRC_MODE, val | tp->grc_mode);
7547 tg3_switch_clocks(tp);
7549 /* Clear this out for sanity. */
7550 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7552 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7554 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7555 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7556 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7558 if (chiprevid == CHIPREV_ID_5701_A0 ||
7559 chiprevid == CHIPREV_ID_5701_B0 ||
7560 chiprevid == CHIPREV_ID_5701_B2 ||
7561 chiprevid == CHIPREV_ID_5701_B5) {
7562 void __iomem *sram_base;
7564 /* Write some dummy words into the SRAM status block
7565 * area, see if it reads back correctly. If the return
7566 * value is bad, force enable the PCIX workaround.
7568 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7570 writel(0x00000000, sram_base);
7571 writel(0x00000000, sram_base + 4);
7572 writel(0xffffffff, sram_base + 4);
7573 if (readl(sram_base) != 0x00000000)
7574 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7581 grc_misc_cfg = tr32(GRC_MISC_CFG);
7582 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7584 /* Broadcom's driver says that CIOBE multisplit has a bug */
7586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7587 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7588 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7589 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7593 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7594 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7595 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7597 /* these are limited to 10/100 only */
7598 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7599 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7600 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7601 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7602 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7603 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7604 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7605 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7606 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7607 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7609 err = tg3_phy_probe(tp);
7611 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7612 pci_name(tp->pdev), err);
7613 /* ... but do not return immediately ... */
7616 tg3_read_partno(tp);
7618 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7619 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7622 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7624 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7627 /* 5700 {AX,BX} chips have a broken status block link
7628 * change bit implementation, so we must use the
7629 * status register in those cases.
7631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7632 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7634 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7636 /* The led_ctrl is set during tg3_phy_probe, here we might
7637 * have to force the link status polling mechanism based
7638 * upon subsystem IDs.
7640 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7641 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7642 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7643 TG3_FLAG_USE_LINKCHG_REG);
7646 /* For all SERDES we poll the MAC status register. */
7647 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7648 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7650 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7652 /* 5700 BX chips need to have their TX producer index mailboxes
7653 * written twice to workaround a bug.
7655 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7656 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7658 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7660 /* It seems all chips can get confused if TX buffers
7661 * straddle the 4GB address boundary in some cases.
7663 tp->dev->hard_start_xmit = tg3_start_xmit;
7666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7667 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7670 /* By default, disable wake-on-lan. User can change this
7671 * using ETHTOOL_SWOL.
7673 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7678 #ifdef CONFIG_SPARC64
7679 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7681 struct net_device *dev = tp->dev;
7682 struct pci_dev *pdev = tp->pdev;
7683 struct pcidev_cookie *pcp = pdev->sysdata;
7686 int node = pcp->prom_node;
7688 if (prom_getproplen(node, "local-mac-address") == 6) {
7689 prom_getproperty(node, "local-mac-address",
7697 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7699 struct net_device *dev = tp->dev;
7701 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7706 static int __devinit tg3_get_device_address(struct tg3 *tp)
7708 struct net_device *dev = tp->dev;
7709 u32 hi, lo, mac_offset;
7711 #ifdef CONFIG_SPARC64
7712 if (!tg3_get_macaddr_sparc(tp))
7717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7718 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
7719 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7721 if (tg3_nvram_lock(tp))
7722 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7724 tg3_nvram_unlock(tp);
7727 /* First try to get it from MAC address mailbox. */
7728 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7729 if ((hi >> 16) == 0x484b) {
7730 dev->dev_addr[0] = (hi >> 8) & 0xff;
7731 dev->dev_addr[1] = (hi >> 0) & 0xff;
7733 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7734 dev->dev_addr[2] = (lo >> 24) & 0xff;
7735 dev->dev_addr[3] = (lo >> 16) & 0xff;
7736 dev->dev_addr[4] = (lo >> 8) & 0xff;
7737 dev->dev_addr[5] = (lo >> 0) & 0xff;
7739 /* Next, try NVRAM. */
7740 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
7741 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7742 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7743 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7744 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7745 dev->dev_addr[2] = ((lo >> 0) & 0xff);
7746 dev->dev_addr[3] = ((lo >> 8) & 0xff);
7747 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7748 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7750 /* Finally just fetch it out of the MAC control regs. */
7752 hi = tr32(MAC_ADDR_0_HIGH);
7753 lo = tr32(MAC_ADDR_0_LOW);
7755 dev->dev_addr[5] = lo & 0xff;
7756 dev->dev_addr[4] = (lo >> 8) & 0xff;
7757 dev->dev_addr[3] = (lo >> 16) & 0xff;
7758 dev->dev_addr[2] = (lo >> 24) & 0xff;
7759 dev->dev_addr[1] = hi & 0xff;
7760 dev->dev_addr[0] = (hi >> 8) & 0xff;
7763 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7764 #ifdef CONFIG_SPARC64
7765 if (!tg3_get_default_macaddr_sparc(tp))
7773 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7775 struct tg3_internal_buffer_desc test_desc;
7779 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7781 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7782 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7783 tw32(RDMAC_STATUS, 0);
7784 tw32(WDMAC_STATUS, 0);
7786 tw32(BUFMGR_MODE, 0);
7789 test_desc.addr_hi = ((u64) buf_dma) >> 32;
7790 test_desc.addr_lo = buf_dma & 0xffffffff;
7791 test_desc.nic_mbuf = 0x00002100;
7792 test_desc.len = size;
7795 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7796 * the *second* time the tg3 driver was getting loaded after an
7799 * Broadcom tells me:
7800 * ...the DMA engine is connected to the GRC block and a DMA
7801 * reset may affect the GRC block in some unpredictable way...
7802 * The behavior of resets to individual blocks has not been tested.
7804 * Broadcom noted the GRC reset will also reset all sub-components.
7807 test_desc.cqid_sqid = (13 << 8) | 2;
7809 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7812 test_desc.cqid_sqid = (16 << 8) | 7;
7814 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7817 test_desc.flags = 0x00000005;
7819 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7822 val = *(((u32 *)&test_desc) + i);
7823 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7824 sram_dma_descs + (i * sizeof(u32)));
7825 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7827 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7830 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7832 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7836 for (i = 0; i < 40; i++) {
7840 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7842 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7843 if ((val & 0xffff) == sram_dma_descs) {
7854 #define TEST_BUFFER_SIZE 0x400
7856 static int __devinit tg3_test_dma(struct tg3 *tp)
7862 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7868 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7869 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7875 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7878 cacheline_size = 1024;
7880 cacheline_size = (int) byte * 4;
7882 switch (cacheline_size) {
7887 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7888 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7890 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7892 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7894 ~(DMA_RWCTRL_PCI_WRITE_CMD);
7896 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7901 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7902 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7904 DMA_RWCTRL_WRITE_BNDRY_256;
7905 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7907 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7912 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7913 tp->dma_rwctrl |= 0x001f0000;
7914 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7917 tp->dma_rwctrl |= 0x003f0000;
7919 tp->dma_rwctrl |= 0x003f000f;
7921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7923 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7925 if (ccval == 0x6 || ccval == 0x7)
7926 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7928 /* Set bit 23 to renable PCIX hw bug fix */
7929 tp->dma_rwctrl |= 0x009f0000;
7931 tp->dma_rwctrl |= 0x001b000f;
7935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7937 tp->dma_rwctrl &= 0xfffffff0;
7939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7941 /* Remove this if it causes problems for some boards. */
7942 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7944 /* On 5700/5701 chips, we need to set this bit.
7945 * Otherwise the chip will issue cacheline transactions
7946 * to streamable DMA memory with not all the byte
7947 * enables turned on. This is an error on several
7948 * RISC PCI controllers, in particular sparc64.
7950 * On 5703/5704 chips, this bit has been reassigned
7951 * a different meaning. In particular, it is used
7952 * on those chips to enable a PCI-X workaround.
7954 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7957 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7960 /* Unneeded, already done by tg3_get_invariants. */
7961 tg3_switch_clocks(tp);
7965 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7966 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7972 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7975 /* Send the buffer to the chip. */
7976 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7978 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7983 /* validate data reached card RAM correctly. */
7984 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7986 tg3_read_mem(tp, 0x2100 + (i*4), &val);
7987 if (le32_to_cpu(val) != p[i]) {
7988 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
7989 /* ret = -ENODEV here? */
7994 /* Now read it back. */
7995 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7997 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8003 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8007 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8008 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8009 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8010 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8013 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8019 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8027 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8032 static void __devinit tg3_init_link_config(struct tg3 *tp)
8034 tp->link_config.advertising =
8035 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8036 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8037 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8038 ADVERTISED_Autoneg | ADVERTISED_MII);
8039 tp->link_config.speed = SPEED_INVALID;
8040 tp->link_config.duplex = DUPLEX_INVALID;
8041 tp->link_config.autoneg = AUTONEG_ENABLE;
8042 netif_carrier_off(tp->dev);
8043 tp->link_config.active_speed = SPEED_INVALID;
8044 tp->link_config.active_duplex = DUPLEX_INVALID;
8045 tp->link_config.phy_is_low_power = 0;
8046 tp->link_config.orig_speed = SPEED_INVALID;
8047 tp->link_config.orig_duplex = DUPLEX_INVALID;
8048 tp->link_config.orig_autoneg = AUTONEG_INVALID;
8051 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8053 tp->bufmgr_config.mbuf_read_dma_low_water =
8054 DEFAULT_MB_RDMA_LOW_WATER;
8055 tp->bufmgr_config.mbuf_mac_rx_low_water =
8056 DEFAULT_MB_MACRX_LOW_WATER;
8057 tp->bufmgr_config.mbuf_high_water =
8058 DEFAULT_MB_HIGH_WATER;
8060 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8061 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8062 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8063 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8064 tp->bufmgr_config.mbuf_high_water_jumbo =
8065 DEFAULT_MB_HIGH_WATER_JUMBO;
8067 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8068 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8071 static char * __devinit tg3_phy_string(struct tg3 *tp)
8073 switch (tp->phy_id & PHY_ID_MASK) {
8074 case PHY_ID_BCM5400: return "5400";
8075 case PHY_ID_BCM5401: return "5401";
8076 case PHY_ID_BCM5411: return "5411";
8077 case PHY_ID_BCM5701: return "5701";
8078 case PHY_ID_BCM5703: return "5703";
8079 case PHY_ID_BCM5704: return "5704";
8080 case PHY_ID_BCM5705: return "5705";
8081 case PHY_ID_BCM5750: return "5750";
8082 case PHY_ID_BCM8002: return "8002/serdes";
8083 case 0: return "serdes";
8084 default: return "unknown";
8088 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8090 struct pci_dev *peer;
8091 unsigned int func, devnr = tp->pdev->devfn & ~7;
8093 for (func = 0; func < 8; func++) {
8094 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8095 if (peer && peer != tp->pdev)
8099 if (!peer || peer == tp->pdev)
8103 * We don't need to keep the refcount elevated; there's no way
8104 * to remove one half of this device without removing the other
8111 static int __devinit tg3_init_one(struct pci_dev *pdev,
8112 const struct pci_device_id *ent)
8114 static int tg3_version_printed = 0;
8115 unsigned long tg3reg_base, tg3reg_len;
8116 struct net_device *dev;
8118 int i, err, pci_using_dac, pm_cap;
8120 if (tg3_version_printed++ == 0)
8121 printk(KERN_INFO "%s", version);
8123 err = pci_enable_device(pdev);
8125 printk(KERN_ERR PFX "Cannot enable PCI device, "
8130 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8131 printk(KERN_ERR PFX "Cannot find proper PCI device "
8132 "base address, aborting.\n");
8134 goto err_out_disable_pdev;
8137 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8139 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8141 goto err_out_disable_pdev;
8144 pci_set_master(pdev);
8146 /* Find power-management capability. */
8147 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8149 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8152 goto err_out_free_res;
8155 /* Configure DMA attributes. */
8156 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8159 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8161 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8162 "for consistent allocations\n");
8163 goto err_out_free_res;
8166 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8168 printk(KERN_ERR PFX "No usable DMA configuration, "
8170 goto err_out_free_res;
8175 tg3reg_base = pci_resource_start(pdev, 0);
8176 tg3reg_len = pci_resource_len(pdev, 0);
8178 dev = alloc_etherdev(sizeof(*tp));
8180 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8182 goto err_out_free_res;
8185 SET_MODULE_OWNER(dev);
8186 SET_NETDEV_DEV(dev, &pdev->dev);
8189 dev->features |= NETIF_F_HIGHDMA;
8190 dev->features |= NETIF_F_LLTX;
8191 #if TG3_VLAN_TAG_USED
8192 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8193 dev->vlan_rx_register = tg3_vlan_rx_register;
8194 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8197 tp = netdev_priv(dev);
8200 tp->pm_cap = pm_cap;
8201 tp->mac_mode = TG3_DEF_MAC_MODE;
8202 tp->rx_mode = TG3_DEF_RX_MODE;
8203 tp->tx_mode = TG3_DEF_TX_MODE;
8204 tp->mi_mode = MAC_MI_MODE_BASE;
8206 tp->msg_enable = tg3_debug;
8208 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8210 /* The word/byte swap controls here control register access byte
8211 * swapping. DMA data byte swapping is controlled in the GRC_MODE
8214 tp->misc_host_ctrl =
8215 MISC_HOST_CTRL_MASK_PCI_INT |
8216 MISC_HOST_CTRL_WORD_SWAP |
8217 MISC_HOST_CTRL_INDIR_ACCESS |
8218 MISC_HOST_CTRL_PCISTATE_RW;
8220 /* The NONFRM (non-frame) byte/word swap controls take effect
8221 * on descriptor entries, anything which isn't packet data.
8223 * The StrongARM chips on the board (one for tx, one for rx)
8224 * are running in big-endian mode.
8226 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8227 GRC_MODE_WSWAP_NONFRM_DATA);
8229 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8231 spin_lock_init(&tp->lock);
8232 spin_lock_init(&tp->tx_lock);
8233 spin_lock_init(&tp->indirect_lock);
8234 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8236 tp->regs = ioremap(tg3reg_base, tg3reg_len);
8237 if (tp->regs == 0UL) {
8238 printk(KERN_ERR PFX "Cannot map device registers, "
8241 goto err_out_free_dev;
8244 tg3_init_link_config(tp);
8246 tg3_init_bufmgr_config(tp);
8248 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8249 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8250 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8252 dev->open = tg3_open;
8253 dev->stop = tg3_close;
8254 dev->get_stats = tg3_get_stats;
8255 dev->set_multicast_list = tg3_set_rx_mode;
8256 dev->set_mac_address = tg3_set_mac_addr;
8257 dev->do_ioctl = tg3_ioctl;
8258 dev->tx_timeout = tg3_tx_timeout;
8259 dev->poll = tg3_poll;
8260 dev->ethtool_ops = &tg3_ethtool_ops;
8262 dev->watchdog_timeo = TG3_TX_TIMEOUT;
8263 dev->change_mtu = tg3_change_mtu;
8264 dev->irq = pdev->irq;
8265 #ifdef CONFIG_NET_POLL_CONTROLLER
8266 dev->poll_controller = tg3_poll_controller;
8269 err = tg3_get_invariants(tp);
8271 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8273 goto err_out_iounmap;
8276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8277 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8278 tp->bufmgr_config.mbuf_read_dma_low_water =
8279 DEFAULT_MB_RDMA_LOW_WATER_5705;
8280 tp->bufmgr_config.mbuf_mac_rx_low_water =
8281 DEFAULT_MB_MACRX_LOW_WATER_5705;
8282 tp->bufmgr_config.mbuf_high_water =
8283 DEFAULT_MB_HIGH_WATER_5705;
8286 #if TG3_TSO_SUPPORT != 0
8287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8289 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8290 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8291 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8292 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8294 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8297 /* TSO is off by default, user can enable using ethtool. */
8299 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8300 dev->features |= NETIF_F_TSO;
8305 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8306 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8307 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8308 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8309 tp->rx_pending = 63;
8312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8313 tp->pdev_peer = tg3_find_5704_peer(tp);
8315 err = tg3_get_device_address(tp);
8317 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8319 goto err_out_iounmap;
8323 * Reset chip in case UNDI or EFI driver did not shutdown
8324 * DMA self test will enable WDMAC and we'll see (spurious)
8325 * pending DMA on the PCI bus at that point.
8327 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8328 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8329 pci_save_state(tp->pdev, tp->pci_cfg_state);
8330 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8334 err = tg3_test_dma(tp);
8336 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8337 goto err_out_iounmap;
8340 /* Tigon3 can do ipv4 only... and some chips have buggy
8343 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8344 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8345 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8347 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8349 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8350 dev->features &= ~NETIF_F_HIGHDMA;
8352 /* flow control autonegotiation is default behavior */
8353 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8355 err = register_netdev(dev);
8357 printk(KERN_ERR PFX "Cannot register net device, "
8359 goto err_out_iounmap;
8362 pci_set_drvdata(pdev, dev);
8364 /* Now that we have fully setup the chip, save away a snapshot
8365 * of the PCI config space. We need to restore this after
8366 * GRC_MISC_CFG core clock resets and some resume events.
8368 pci_save_state(tp->pdev, tp->pci_cfg_state);
8370 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8372 tp->board_part_number,
8373 tp->pci_chip_rev_id,
8375 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8376 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8377 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8378 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8379 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8380 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8382 for (i = 0; i < 6; i++)
8383 printk("%2.2x%c", dev->dev_addr[i],
8384 i == 5 ? '\n' : ':');
8386 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8387 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8390 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8391 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8392 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8393 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8394 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8395 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8396 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8407 pci_release_regions(pdev);
8409 err_out_disable_pdev:
8410 pci_disable_device(pdev);
8411 pci_set_drvdata(pdev, NULL);
8415 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8417 struct net_device *dev = pci_get_drvdata(pdev);
8420 struct tg3 *tp = netdev_priv(dev);
8422 unregister_netdev(dev);
8425 pci_release_regions(pdev);
8426 pci_disable_device(pdev);
8427 pci_set_drvdata(pdev, NULL);
8431 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8433 struct net_device *dev = pci_get_drvdata(pdev);
8434 struct tg3 *tp = netdev_priv(dev);
8437 if (!netif_running(dev))
8442 del_timer_sync(&tp->timer);
8444 spin_lock_irq(&tp->lock);
8445 spin_lock(&tp->tx_lock);
8446 tg3_disable_ints(tp);
8447 spin_unlock(&tp->tx_lock);
8448 spin_unlock_irq(&tp->lock);
8450 netif_device_detach(dev);
8452 spin_lock_irq(&tp->lock);
8453 spin_lock(&tp->tx_lock);
8455 spin_unlock(&tp->tx_lock);
8456 spin_unlock_irq(&tp->lock);
8458 err = tg3_set_power_state(tp, state);
8460 spin_lock_irq(&tp->lock);
8461 spin_lock(&tp->tx_lock);
8465 tp->timer.expires = jiffies + tp->timer_offset;
8466 add_timer(&tp->timer);
8468 netif_device_attach(dev);
8469 tg3_netif_start(tp);
8471 spin_unlock(&tp->tx_lock);
8472 spin_unlock_irq(&tp->lock);
8478 static int tg3_resume(struct pci_dev *pdev)
8480 struct net_device *dev = pci_get_drvdata(pdev);
8481 struct tg3 *tp = netdev_priv(dev);
8484 if (!netif_running(dev))
8487 pci_restore_state(tp->pdev, tp->pci_cfg_state);
8489 err = tg3_set_power_state(tp, 0);
8493 netif_device_attach(dev);
8495 spin_lock_irq(&tp->lock);
8496 spin_lock(&tp->tx_lock);
8500 tp->timer.expires = jiffies + tp->timer_offset;
8501 add_timer(&tp->timer);
8503 tg3_enable_ints(tp);
8505 tg3_netif_start(tp);
8507 spin_unlock(&tp->tx_lock);
8508 spin_unlock_irq(&tp->lock);
8513 static struct pci_driver tg3_driver = {
8514 .name = DRV_MODULE_NAME,
8515 .id_table = tg3_pci_tbl,
8516 .probe = tg3_init_one,
8517 .remove = __devexit_p(tg3_remove_one),
8518 .suspend = tg3_suspend,
8519 .resume = tg3_resume
8522 static int __init tg3_init(void)
8524 return pci_module_init(&tg3_driver);
8527 static void __exit tg3_cleanup(void)
8529 pci_unregister_driver(&tg3_driver);
8532 module_init(tg3_init);
8533 module_exit(tg3_cleanup);