2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9 #include <linux/config.h>
11 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/if_vlan.h>
28 #include <linux/tcp.h>
29 #include <linux/workqueue.h>
31 #include <net/checksum.h>
33 #include <asm/system.h>
35 #include <asm/byteorder.h>
36 #include <asm/uaccess.h>
39 #include <asm/idprom.h>
40 #include <asm/oplib.h>
44 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
45 #define TG3_VLAN_TAG_USED 1
47 #define TG3_VLAN_TAG_USED 0
51 #define TG3_TSO_SUPPORT 1
53 #define TG3_TSO_SUPPORT 0
58 #define DRV_MODULE_NAME "tg3"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "3.8"
61 #define DRV_MODULE_RELDATE "July 14, 2004"
63 #define TG3_DEF_MAC_MODE 0
64 #define TG3_DEF_RX_MODE 0
65 #define TG3_DEF_TX_MODE 0
66 #define TG3_DEF_MSG_ENABLE \
76 /* length of time before we decide the hardware is borked,
77 * and dev->tx_timeout() should be called to fix the problem
79 #define TG3_TX_TIMEOUT (5 * HZ)
81 /* hardware minimum and maximum for a single frame's data payload */
82 #define TG3_MIN_MTU 60
83 #define TG3_MAX_MTU(tp) \
84 ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
85 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
87 /* These numbers seem to be hard coded in the NIC firmware somehow.
88 * You can't change the ring sizes, but you can change where you place
89 * them in the NIC onboard memory.
91 #define TG3_RX_RING_SIZE 512
92 #define TG3_DEF_RX_RING_PENDING 200
93 #define TG3_RX_JUMBO_RING_SIZE 256
94 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
96 /* Do not place this n-ring entries value into the tp struct itself,
97 * we really want to expose these constants to GCC so that modulo et
98 * al. operations are done with shifts and masks instead of with
99 * hw multiply/modulo instructions. Another solution would be to
100 * replace things like '% foo' with '& (foo - 1)'.
102 #define TG3_RX_RCB_RING_SIZE(tp) \
103 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
107 #define TG3_TX_RING_SIZE 512
108 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
110 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
112 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
113 TG3_RX_JUMBO_RING_SIZE)
114 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RCB_RING_SIZE(tp))
116 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
118 #define TX_RING_GAP(TP) \
119 (TG3_TX_RING_SIZE - (TP)->tx_pending)
120 #define TX_BUFFS_AVAIL(TP) \
121 (((TP)->tx_cons <= (TP)->tx_prod) ? \
122 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
123 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
124 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_PARM(tg3_debug, "i");
142 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
144 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 static struct pci_device_id tg3_pci_tbl[] = {
147 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225 char string[ETH_GSTRING_LEN];
226 } ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_ucast_packets" },
230 { "rx_mcast_packets" },
231 { "rx_bcast_packets" },
233 { "rx_align_errors" },
234 { "rx_xon_pause_rcvd" },
235 { "rx_xoff_pause_rcvd" },
236 { "rx_mac_ctrl_rcvd" },
237 { "rx_xoff_entered" },
238 { "rx_frame_too_long_errors" },
240 { "rx_undersize_packets" },
241 { "rx_in_length_errors" },
242 { "rx_out_length_errors" },
243 { "rx_64_or_less_octet_packets" },
244 { "rx_65_to_127_octet_packets" },
245 { "rx_128_to_255_octet_packets" },
246 { "rx_256_to_511_octet_packets" },
247 { "rx_512_to_1023_octet_packets" },
248 { "rx_1024_to_1522_octet_packets" },
249 { "rx_1523_to_2047_octet_packets" },
250 { "rx_2048_to_4095_octet_packets" },
251 { "rx_4096_to_8191_octet_packets" },
252 { "rx_8192_to_9022_octet_packets" },
259 { "tx_flow_control" },
261 { "tx_single_collisions" },
262 { "tx_mult_collisions" },
264 { "tx_excessive_collisions" },
265 { "tx_late_collisions" },
266 { "tx_collide_2times" },
267 { "tx_collide_3times" },
268 { "tx_collide_4times" },
269 { "tx_collide_5times" },
270 { "tx_collide_6times" },
271 { "tx_collide_7times" },
272 { "tx_collide_8times" },
273 { "tx_collide_9times" },
274 { "tx_collide_10times" },
275 { "tx_collide_11times" },
276 { "tx_collide_12times" },
277 { "tx_collide_13times" },
278 { "tx_collide_14times" },
279 { "tx_collide_15times" },
280 { "tx_ucast_packets" },
281 { "tx_mcast_packets" },
282 { "tx_bcast_packets" },
283 { "tx_carrier_sense_errors" },
287 { "dma_writeq_full" },
288 { "dma_write_prioq_full" },
292 { "rx_threshold_hit" },
294 { "dma_readq_full" },
295 { "dma_read_prioq_full" },
296 { "tx_comp_queue_full" },
298 { "ring_set_send_prod_index" },
299 { "ring_status_update" },
301 { "nic_avoided_irqs" },
302 { "nic_tx_threshold_hit" }
305 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
307 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
310 spin_lock_irqsave(&tp->indirect_lock, flags);
311 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
312 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
313 spin_unlock_irqrestore(&tp->indirect_lock, flags);
315 writel(val, tp->regs + off);
316 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
317 readl(tp->regs + off);
321 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
323 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
326 spin_lock_irqsave(&tp->indirect_lock, flags);
327 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
328 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
329 spin_unlock_irqrestore(&tp->indirect_lock, flags);
331 unsigned long dest = tp->regs + off;
333 readl(dest); /* always flush PCI write */
337 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
339 unsigned long mbox = tp->regs + off;
341 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
345 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
347 unsigned long mbox = tp->regs + off;
349 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
351 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
355 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
356 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
357 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
359 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
360 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
361 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
362 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
363 #define tr32(reg) readl(tp->regs + (reg))
364 #define tr16(reg) readw(tp->regs + (reg))
365 #define tr8(reg) readb(tp->regs + (reg))
367 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
371 spin_lock_irqsave(&tp->indirect_lock, flags);
372 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
373 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
375 /* Always leave this as zero. */
376 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
377 spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
384 spin_lock_irqsave(&tp->indirect_lock, flags);
385 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
386 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
388 /* Always leave this as zero. */
389 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
390 spin_unlock_irqrestore(&tp->indirect_lock, flags);
393 static void tg3_disable_ints(struct tg3 *tp)
395 tw32(TG3PCI_MISC_HOST_CTRL,
396 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
397 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
398 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
401 static inline void tg3_cond_int(struct tg3 *tp)
403 if (tp->hw_status->status & SD_STATUS_UPDATED)
404 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
407 static void tg3_enable_ints(struct tg3 *tp)
409 tw32(TG3PCI_MISC_HOST_CTRL,
410 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
411 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
412 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
417 static inline void tg3_netif_stop(struct tg3 *tp)
419 netif_poll_disable(tp->dev);
420 netif_tx_disable(tp->dev);
423 static inline void tg3_netif_start(struct tg3 *tp)
425 netif_wake_queue(tp->dev);
426 /* NOTE: unconditional netif_wake_queue is only appropriate
427 * so long as all callers are assured to have free tx slots
428 * (such as after tg3_init_hw)
430 netif_poll_enable(tp->dev);
434 static void tg3_switch_clocks(struct tg3 *tp)
436 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
439 orig_clock_ctrl = clock_ctrl;
440 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
441 CLOCK_CTRL_CLKRUN_OENABLE |
443 tp->pci_clock_ctrl = clock_ctrl;
445 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
447 (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
448 tw32_f(TG3PCI_CLOCK_CTRL,
450 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
452 tw32_f(TG3PCI_CLOCK_CTRL,
453 clock_ctrl | (CLOCK_CTRL_ALTCLK));
456 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
460 #define PHY_BUSY_LOOPS 5000
462 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
467 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
469 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
475 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
476 MI_COM_PHY_ADDR_MASK);
477 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
478 MI_COM_REG_ADDR_MASK);
479 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
481 tw32_f(MAC_MI_COM, frame_val);
483 loops = PHY_BUSY_LOOPS;
484 while (loops-- > 0) {
486 frame_val = tr32(MAC_MI_COM);
488 if ((frame_val & MI_COM_BUSY) == 0) {
490 frame_val = tr32(MAC_MI_COM);
497 *val = frame_val & MI_COM_DATA_MASK;
501 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
502 tw32_f(MAC_MI_MODE, tp->mi_mode);
509 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
514 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
516 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
520 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
521 MI_COM_PHY_ADDR_MASK);
522 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
523 MI_COM_REG_ADDR_MASK);
524 frame_val |= (val & MI_COM_DATA_MASK);
525 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
527 tw32_f(MAC_MI_COM, frame_val);
529 loops = PHY_BUSY_LOOPS;
530 while (loops-- > 0) {
532 frame_val = tr32(MAC_MI_COM);
533 if ((frame_val & MI_COM_BUSY) == 0) {
535 frame_val = tr32(MAC_MI_COM);
544 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
545 tw32_f(MAC_MI_MODE, tp->mi_mode);
552 static void tg3_phy_set_wirespeed(struct tg3 *tp)
556 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
559 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
560 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
561 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
564 static int tg3_bmcr_reset(struct tg3 *tp)
569 /* OK, reset it, and poll the BMCR_RESET bit until it
570 * clears or we time out.
572 phy_control = BMCR_RESET;
573 err = tg3_writephy(tp, MII_BMCR, phy_control);
579 err = tg3_readphy(tp, MII_BMCR, &phy_control);
583 if ((phy_control & BMCR_RESET) == 0) {
595 static int tg3_wait_macro_done(struct tg3 *tp)
602 tg3_readphy(tp, 0x16, &tmp32);
603 if ((tmp32 & 0x1000) == 0)
612 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
614 static const u32 test_pat[4][6] = {
615 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
616 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
617 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
618 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
622 for (chan = 0; chan < 4; chan++) {
625 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
626 (chan * 0x2000) | 0x0200);
627 tg3_writephy(tp, 0x16, 0x0002);
629 for (i = 0; i < 6; i++)
630 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
633 tg3_writephy(tp, 0x16, 0x0202);
634 if (tg3_wait_macro_done(tp)) {
639 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
640 (chan * 0x2000) | 0x0200);
641 tg3_writephy(tp, 0x16, 0x0082);
642 if (tg3_wait_macro_done(tp)) {
647 tg3_writephy(tp, 0x16, 0x0802);
648 if (tg3_wait_macro_done(tp)) {
653 for (i = 0; i < 6; i += 2) {
656 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
657 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
658 if (tg3_wait_macro_done(tp)) {
664 if (low != test_pat[chan][i] ||
665 high != test_pat[chan][i+1]) {
666 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
668 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
678 static int tg3_phy_reset_chanpat(struct tg3 *tp)
682 for (chan = 0; chan < 4; chan++) {
685 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
686 (chan * 0x2000) | 0x0200);
687 tg3_writephy(tp, 0x16, 0x0002);
688 for (i = 0; i < 6; i++)
689 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
690 tg3_writephy(tp, 0x16, 0x0202);
691 if (tg3_wait_macro_done(tp))
698 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
700 u32 reg32, phy9_orig;
701 int retries, do_phy_reset, err;
707 err = tg3_bmcr_reset(tp);
713 /* Disable transmitter and interrupt. */
714 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
716 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
718 /* Set full-duplex, 1000 mbps. */
719 tg3_writephy(tp, MII_BMCR,
720 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
722 /* Set to master mode. */
723 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
724 tg3_writephy(tp, MII_TG3_CTRL,
725 (MII_TG3_CTRL_AS_MASTER |
726 MII_TG3_CTRL_ENABLE_AS_MASTER));
728 /* Enable SM_DSP_CLOCK and 6dB. */
729 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
731 /* Block the PHY control access. */
732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
735 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
740 err = tg3_phy_reset_chanpat(tp);
744 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
745 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
747 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
748 tg3_writephy(tp, 0x16, 0x0000);
750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
752 /* Set Extended packet length bit for jumbo frames */
753 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
756 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
759 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
761 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
763 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
768 /* This will reset the tigon3 PHY if there is no valid
769 * link unless the FORCE argument is non-zero.
771 static int tg3_phy_reset(struct tg3 *tp)
776 err = tg3_readphy(tp, MII_BMSR, &phy_status);
777 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
784 err = tg3_phy_reset_5703_4_5(tp);
790 err = tg3_bmcr_reset(tp);
795 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
796 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
797 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
798 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
799 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
800 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
801 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
803 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
804 tg3_writephy(tp, 0x1c, 0x8d68);
805 tg3_writephy(tp, 0x1c, 0x8d68);
807 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
808 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
811 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
812 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
813 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
814 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
815 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
817 /* Set Extended packet length bit (bit 14) on all chips that */
818 /* support jumbo frames */
819 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
820 /* Cannot do read-modify-write on 5401 */
821 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
822 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
823 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
826 /* Set bit 14 with read-modify-write to preserve other bits */
827 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
828 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
829 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
831 tg3_phy_set_wirespeed(tp);
835 static void tg3_frob_aux_power(struct tg3 *tp)
837 struct tg3 *tp_peer = tp;
839 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
843 tp_peer = pci_get_drvdata(tp->pdev_peer);
849 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
850 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
853 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
854 (GRC_LCLCTRL_GPIO_OE0 |
855 GRC_LCLCTRL_GPIO_OE1 |
856 GRC_LCLCTRL_GPIO_OE2 |
857 GRC_LCLCTRL_GPIO_OUTPUT0 |
858 GRC_LCLCTRL_GPIO_OUTPUT1));
862 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
865 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
866 (GRC_LCLCTRL_GPIO_OE0 |
867 GRC_LCLCTRL_GPIO_OE1 |
868 GRC_LCLCTRL_GPIO_OE2 |
869 GRC_LCLCTRL_GPIO_OUTPUT1 |
870 GRC_LCLCTRL_GPIO_OUTPUT2));
873 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
874 (GRC_LCLCTRL_GPIO_OE0 |
875 GRC_LCLCTRL_GPIO_OE1 |
876 GRC_LCLCTRL_GPIO_OE2 |
877 GRC_LCLCTRL_GPIO_OUTPUT0 |
878 GRC_LCLCTRL_GPIO_OUTPUT1 |
879 GRC_LCLCTRL_GPIO_OUTPUT2));
882 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
883 (GRC_LCLCTRL_GPIO_OE0 |
884 GRC_LCLCTRL_GPIO_OE1 |
885 GRC_LCLCTRL_GPIO_OE2 |
886 GRC_LCLCTRL_GPIO_OUTPUT0 |
887 GRC_LCLCTRL_GPIO_OUTPUT1));
891 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
892 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
894 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
897 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
898 (GRC_LCLCTRL_GPIO_OE1 |
899 GRC_LCLCTRL_GPIO_OUTPUT1));
902 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
903 (GRC_LCLCTRL_GPIO_OE1));
906 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907 (GRC_LCLCTRL_GPIO_OE1 |
908 GRC_LCLCTRL_GPIO_OUTPUT1));
914 static int tg3_setup_phy(struct tg3 *, int);
916 #define RESET_KIND_SHUTDOWN 0
917 #define RESET_KIND_INIT 1
918 #define RESET_KIND_SUSPEND 2
920 static void tg3_write_sig_post_reset(struct tg3 *, int);
922 static int tg3_set_power_state(struct tg3 *tp, int state)
925 u16 power_control, power_caps;
928 /* Make sure register accesses (indirect or otherwise)
929 * will function correctly.
931 pci_write_config_dword(tp->pdev,
932 TG3PCI_MISC_HOST_CTRL,
935 pci_read_config_word(tp->pdev,
938 power_control |= PCI_PM_CTRL_PME_STATUS;
939 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
943 pci_write_config_word(tp->pdev,
946 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
964 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
966 tp->dev->name, state);
970 power_control |= PCI_PM_CTRL_PME_ENABLE;
972 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
973 tw32(TG3PCI_MISC_HOST_CTRL,
974 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
976 if (tp->link_config.phy_is_low_power == 0) {
977 tp->link_config.phy_is_low_power = 1;
978 tp->link_config.orig_speed = tp->link_config.speed;
979 tp->link_config.orig_duplex = tp->link_config.duplex;
980 tp->link_config.orig_autoneg = tp->link_config.autoneg;
983 if (tp->phy_id != PHY_ID_SERDES) {
984 tp->link_config.speed = SPEED_10;
985 tp->link_config.duplex = DUPLEX_HALF;
986 tp->link_config.autoneg = AUTONEG_ENABLE;
987 tg3_setup_phy(tp, 0);
990 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
992 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
995 if (tp->phy_id != PHY_ID_SERDES) {
996 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
999 mac_mode = MAC_MODE_PORT_MODE_MII;
1001 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1002 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1003 mac_mode |= MAC_MODE_LINK_POLARITY;
1005 mac_mode = MAC_MODE_PORT_MODE_TBI;
1008 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1009 tw32(MAC_LED_CTRL, tp->led_ctrl);
1011 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1012 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1013 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1015 tw32_f(MAC_MODE, mac_mode);
1018 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1022 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1023 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1027 base_val = tp->pci_clock_ctrl;
1028 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1029 CLOCK_CTRL_TXCLK_DISABLE);
1031 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1033 CLOCK_CTRL_PWRDOWN_PLL133);
1035 } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1036 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1037 u32 newbits1, newbits2;
1039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1041 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1042 CLOCK_CTRL_TXCLK_DISABLE |
1044 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1045 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1047 newbits1 = CLOCK_CTRL_625_CORE;
1048 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1050 newbits1 = CLOCK_CTRL_ALTCLK;
1051 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1054 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1057 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1060 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1061 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1066 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1067 CLOCK_CTRL_TXCLK_DISABLE |
1068 CLOCK_CTRL_44MHZ_CORE);
1070 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1073 tw32_f(TG3PCI_CLOCK_CTRL,
1074 tp->pci_clock_ctrl | newbits3);
1079 tg3_frob_aux_power(tp);
1081 /* Finally, set the new power state. */
1082 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1084 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1089 static void tg3_link_report(struct tg3 *tp)
1091 if (!netif_carrier_ok(tp->dev)) {
1092 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1094 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1096 (tp->link_config.active_speed == SPEED_1000 ?
1098 (tp->link_config.active_speed == SPEED_100 ?
1100 (tp->link_config.active_duplex == DUPLEX_FULL ?
1103 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1106 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1107 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1111 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1113 u32 new_tg3_flags = 0;
1114 u32 old_rx_mode = tp->rx_mode;
1115 u32 old_tx_mode = tp->tx_mode;
1117 if (local_adv & ADVERTISE_PAUSE_CAP) {
1118 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1119 if (remote_adv & LPA_PAUSE_CAP)
1121 (TG3_FLAG_RX_PAUSE |
1123 else if (remote_adv & LPA_PAUSE_ASYM)
1125 (TG3_FLAG_RX_PAUSE);
1127 if (remote_adv & LPA_PAUSE_CAP)
1129 (TG3_FLAG_RX_PAUSE |
1132 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1133 if ((remote_adv & LPA_PAUSE_CAP) &&
1134 (remote_adv & LPA_PAUSE_ASYM))
1135 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1138 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1139 tp->tg3_flags |= new_tg3_flags;
1141 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1142 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1144 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1146 if (old_rx_mode != tp->rx_mode) {
1147 tw32_f(MAC_RX_MODE, tp->rx_mode);
1150 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1151 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1153 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1155 if (old_tx_mode != tp->tx_mode) {
1156 tw32_f(MAC_TX_MODE, tp->tx_mode);
1160 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1162 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1163 case MII_TG3_AUX_STAT_10HALF:
1165 *duplex = DUPLEX_HALF;
1168 case MII_TG3_AUX_STAT_10FULL:
1170 *duplex = DUPLEX_FULL;
1173 case MII_TG3_AUX_STAT_100HALF:
1175 *duplex = DUPLEX_HALF;
1178 case MII_TG3_AUX_STAT_100FULL:
1180 *duplex = DUPLEX_FULL;
1183 case MII_TG3_AUX_STAT_1000HALF:
1184 *speed = SPEED_1000;
1185 *duplex = DUPLEX_HALF;
1188 case MII_TG3_AUX_STAT_1000FULL:
1189 *speed = SPEED_1000;
1190 *duplex = DUPLEX_FULL;
1194 *speed = SPEED_INVALID;
1195 *duplex = DUPLEX_INVALID;
1200 static int tg3_phy_copper_begin(struct tg3 *tp)
1205 if (tp->link_config.phy_is_low_power) {
1206 /* Entering low power mode. Disable gigabit and
1207 * 100baseT advertisements.
1209 tg3_writephy(tp, MII_TG3_CTRL, 0);
1211 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1212 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1213 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1214 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1216 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1217 } else if (tp->link_config.speed == SPEED_INVALID) {
1218 tp->link_config.advertising =
1219 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1220 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1221 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1222 ADVERTISED_Autoneg | ADVERTISED_MII);
1224 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1225 tp->link_config.advertising &=
1226 ~(ADVERTISED_1000baseT_Half |
1227 ADVERTISED_1000baseT_Full);
1229 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1230 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1231 new_adv |= ADVERTISE_10HALF;
1232 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1233 new_adv |= ADVERTISE_10FULL;
1234 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1235 new_adv |= ADVERTISE_100HALF;
1236 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1237 new_adv |= ADVERTISE_100FULL;
1238 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1240 if (tp->link_config.advertising &
1241 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1243 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1244 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1245 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1246 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1247 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1248 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1249 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1250 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1251 MII_TG3_CTRL_ENABLE_AS_MASTER);
1252 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1254 tg3_writephy(tp, MII_TG3_CTRL, 0);
1257 /* Asking for a specific link mode. */
1258 if (tp->link_config.speed == SPEED_1000) {
1259 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1260 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1262 if (tp->link_config.duplex == DUPLEX_FULL)
1263 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1265 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1266 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1267 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1268 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1269 MII_TG3_CTRL_ENABLE_AS_MASTER);
1270 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1272 tg3_writephy(tp, MII_TG3_CTRL, 0);
1274 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1275 if (tp->link_config.speed == SPEED_100) {
1276 if (tp->link_config.duplex == DUPLEX_FULL)
1277 new_adv |= ADVERTISE_100FULL;
1279 new_adv |= ADVERTISE_100HALF;
1281 if (tp->link_config.duplex == DUPLEX_FULL)
1282 new_adv |= ADVERTISE_10FULL;
1284 new_adv |= ADVERTISE_10HALF;
1286 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1290 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1291 tp->link_config.speed != SPEED_INVALID) {
1292 u32 bmcr, orig_bmcr;
1294 tp->link_config.active_speed = tp->link_config.speed;
1295 tp->link_config.active_duplex = tp->link_config.duplex;
1298 switch (tp->link_config.speed) {
1304 bmcr |= BMCR_SPEED100;
1308 bmcr |= TG3_BMCR_SPEED1000;
1312 if (tp->link_config.duplex == DUPLEX_FULL)
1313 bmcr |= BMCR_FULLDPLX;
1315 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1316 if (bmcr != orig_bmcr) {
1317 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1318 for (i = 0; i < 1500; i++) {
1322 tg3_readphy(tp, MII_BMSR, &tmp);
1323 tg3_readphy(tp, MII_BMSR, &tmp);
1324 if (!(tmp & BMSR_LSTATUS)) {
1329 tg3_writephy(tp, MII_BMCR, bmcr);
1333 tg3_writephy(tp, MII_BMCR,
1334 BMCR_ANENABLE | BMCR_ANRESTART);
1340 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1344 /* Turn off tap power management. */
1345 /* Set Extended packet length bit */
1346 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1348 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1349 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1351 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1352 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1354 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1355 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1357 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1358 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1360 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1361 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1368 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1370 u32 adv_reg, all_mask;
1372 tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1373 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1374 ADVERTISE_100HALF | ADVERTISE_100FULL);
1375 if ((adv_reg & all_mask) != all_mask)
1377 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1380 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1381 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1382 MII_TG3_CTRL_ADV_1000_FULL);
1383 if ((tg3_ctrl & all_mask) != all_mask)
1389 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1391 int current_link_up;
1400 (MAC_STATUS_SYNC_CHANGED |
1401 MAC_STATUS_CFG_CHANGED |
1402 MAC_STATUS_MI_COMPLETION |
1403 MAC_STATUS_LNKSTATE_CHANGED));
1406 tp->mi_mode = MAC_MI_MODE_BASE;
1407 tw32_f(MAC_MI_MODE, tp->mi_mode);
1410 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1412 /* Some third-party PHYs need to be reset on link going
1415 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1418 netif_carrier_ok(tp->dev)) {
1419 tg3_readphy(tp, MII_BMSR, &bmsr);
1420 tg3_readphy(tp, MII_BMSR, &bmsr);
1421 if (!(bmsr & BMSR_LSTATUS))
1427 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1428 tg3_readphy(tp, MII_BMSR, &bmsr);
1429 tg3_readphy(tp, MII_BMSR, &bmsr);
1431 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1434 if (!(bmsr & BMSR_LSTATUS)) {
1435 err = tg3_init_5401phy_dsp(tp);
1439 tg3_readphy(tp, MII_BMSR, &bmsr);
1440 for (i = 0; i < 1000; i++) {
1442 tg3_readphy(tp, MII_BMSR, &bmsr);
1443 if (bmsr & BMSR_LSTATUS) {
1449 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1450 !(bmsr & BMSR_LSTATUS) &&
1451 tp->link_config.active_speed == SPEED_1000) {
1452 err = tg3_phy_reset(tp);
1454 err = tg3_init_5401phy_dsp(tp);
1459 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1460 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1461 /* 5701 {A0,B0} CRC bug workaround */
1462 tg3_writephy(tp, 0x15, 0x0a75);
1463 tg3_writephy(tp, 0x1c, 0x8c68);
1464 tg3_writephy(tp, 0x1c, 0x8d68);
1465 tg3_writephy(tp, 0x1c, 0x8c68);
1468 /* Clear pending interrupts... */
1469 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1470 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1472 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1473 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1475 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1479 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1480 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1481 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1483 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1486 current_link_up = 0;
1487 current_speed = SPEED_INVALID;
1488 current_duplex = DUPLEX_INVALID;
1491 for (i = 0; i < 100; i++) {
1492 tg3_readphy(tp, MII_BMSR, &bmsr);
1493 tg3_readphy(tp, MII_BMSR, &bmsr);
1494 if (bmsr & BMSR_LSTATUS)
1499 if (bmsr & BMSR_LSTATUS) {
1502 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1503 for (i = 0; i < 2000; i++) {
1505 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1510 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1515 for (i = 0; i < 200; i++) {
1516 tg3_readphy(tp, MII_BMCR, &bmcr);
1517 tg3_readphy(tp, MII_BMCR, &bmcr);
1518 if (bmcr && bmcr != 0x7fff)
1523 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1524 if (bmcr & BMCR_ANENABLE) {
1525 current_link_up = 1;
1527 /* Force autoneg restart if we are exiting
1530 if (!tg3_copper_is_advertising_all(tp))
1531 current_link_up = 0;
1533 current_link_up = 0;
1536 if (!(bmcr & BMCR_ANENABLE) &&
1537 tp->link_config.speed == current_speed &&
1538 tp->link_config.duplex == current_duplex) {
1539 current_link_up = 1;
1541 current_link_up = 0;
1545 tp->link_config.active_speed = current_speed;
1546 tp->link_config.active_duplex = current_duplex;
1549 if (current_link_up == 1 &&
1550 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1551 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1552 u32 local_adv, remote_adv;
1554 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1555 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1557 tg3_readphy(tp, MII_LPA, &remote_adv);
1558 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1560 /* If we are not advertising full pause capability,
1561 * something is wrong. Bring the link down and reconfigure.
1563 if (local_adv != ADVERTISE_PAUSE_CAP) {
1564 current_link_up = 0;
1566 tg3_setup_flow_control(tp, local_adv, remote_adv);
1570 if (current_link_up == 0) {
1573 tg3_phy_copper_begin(tp);
1575 tg3_readphy(tp, MII_BMSR, &tmp);
1576 tg3_readphy(tp, MII_BMSR, &tmp);
1577 if (tmp & BMSR_LSTATUS)
1578 current_link_up = 1;
1581 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1582 if (current_link_up == 1) {
1583 if (tp->link_config.active_speed == SPEED_100 ||
1584 tp->link_config.active_speed == SPEED_10)
1585 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1587 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1589 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1591 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1592 if (tp->link_config.active_duplex == DUPLEX_HALF)
1593 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1595 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1597 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1598 (current_link_up == 1 &&
1599 tp->link_config.active_speed == SPEED_10))
1600 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1602 if (current_link_up == 1)
1603 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1606 /* ??? Without this setting Netgear GA302T PHY does not
1607 * ??? send/receive packets...
1609 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1610 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1611 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1612 tw32_f(MAC_MI_MODE, tp->mi_mode);
1616 tw32_f(MAC_MODE, tp->mac_mode);
1619 if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
1620 /* Polled via timer. */
1621 tw32_f(MAC_EVENT, 0);
1623 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1628 current_link_up == 1 &&
1629 tp->link_config.active_speed == SPEED_1000 &&
1630 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1631 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1634 (MAC_STATUS_SYNC_CHANGED |
1635 MAC_STATUS_CFG_CHANGED));
1638 NIC_SRAM_FIRMWARE_MBOX,
1639 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1642 if (current_link_up != netif_carrier_ok(tp->dev)) {
1643 if (current_link_up)
1644 netif_carrier_on(tp->dev);
1646 netif_carrier_off(tp->dev);
1647 tg3_link_report(tp);
1653 struct tg3_fiber_aneginfo {
1655 #define ANEG_STATE_UNKNOWN 0
1656 #define ANEG_STATE_AN_ENABLE 1
1657 #define ANEG_STATE_RESTART_INIT 2
1658 #define ANEG_STATE_RESTART 3
1659 #define ANEG_STATE_DISABLE_LINK_OK 4
1660 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1661 #define ANEG_STATE_ABILITY_DETECT 6
1662 #define ANEG_STATE_ACK_DETECT_INIT 7
1663 #define ANEG_STATE_ACK_DETECT 8
1664 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1665 #define ANEG_STATE_COMPLETE_ACK 10
1666 #define ANEG_STATE_IDLE_DETECT_INIT 11
1667 #define ANEG_STATE_IDLE_DETECT 12
1668 #define ANEG_STATE_LINK_OK 13
1669 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1670 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1673 #define MR_AN_ENABLE 0x00000001
1674 #define MR_RESTART_AN 0x00000002
1675 #define MR_AN_COMPLETE 0x00000004
1676 #define MR_PAGE_RX 0x00000008
1677 #define MR_NP_LOADED 0x00000010
1678 #define MR_TOGGLE_TX 0x00000020
1679 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1680 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1681 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1682 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1683 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1684 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1685 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1686 #define MR_TOGGLE_RX 0x00002000
1687 #define MR_NP_RX 0x00004000
1689 #define MR_LINK_OK 0x80000000
1691 unsigned long link_time, cur_time;
1693 u32 ability_match_cfg;
1694 int ability_match_count;
1696 char ability_match, idle_match, ack_match;
1698 u32 txconfig, rxconfig;
1699 #define ANEG_CFG_NP 0x00000080
1700 #define ANEG_CFG_ACK 0x00000040
1701 #define ANEG_CFG_RF2 0x00000020
1702 #define ANEG_CFG_RF1 0x00000010
1703 #define ANEG_CFG_PS2 0x00000001
1704 #define ANEG_CFG_PS1 0x00008000
1705 #define ANEG_CFG_HD 0x00004000
1706 #define ANEG_CFG_FD 0x00002000
1707 #define ANEG_CFG_INVAL 0x00001f06
1712 #define ANEG_TIMER_ENAB 2
1713 #define ANEG_FAILED -1
1715 #define ANEG_STATE_SETTLE_TIME 10000
1717 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1718 struct tg3_fiber_aneginfo *ap)
1720 unsigned long delta;
1724 if (ap->state == ANEG_STATE_UNKNOWN) {
1728 ap->ability_match_cfg = 0;
1729 ap->ability_match_count = 0;
1730 ap->ability_match = 0;
1736 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1737 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1739 if (rx_cfg_reg != ap->ability_match_cfg) {
1740 ap->ability_match_cfg = rx_cfg_reg;
1741 ap->ability_match = 0;
1742 ap->ability_match_count = 0;
1744 if (++ap->ability_match_count > 1) {
1745 ap->ability_match = 1;
1746 ap->ability_match_cfg = rx_cfg_reg;
1749 if (rx_cfg_reg & ANEG_CFG_ACK)
1757 ap->ability_match_cfg = 0;
1758 ap->ability_match_count = 0;
1759 ap->ability_match = 0;
1765 ap->rxconfig = rx_cfg_reg;
1769 case ANEG_STATE_UNKNOWN:
1770 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1771 ap->state = ANEG_STATE_AN_ENABLE;
1774 case ANEG_STATE_AN_ENABLE:
1775 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1776 if (ap->flags & MR_AN_ENABLE) {
1779 ap->ability_match_cfg = 0;
1780 ap->ability_match_count = 0;
1781 ap->ability_match = 0;
1785 ap->state = ANEG_STATE_RESTART_INIT;
1787 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1791 case ANEG_STATE_RESTART_INIT:
1792 ap->link_time = ap->cur_time;
1793 ap->flags &= ~(MR_NP_LOADED);
1795 tw32(MAC_TX_AUTO_NEG, 0);
1796 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1797 tw32_f(MAC_MODE, tp->mac_mode);
1800 ret = ANEG_TIMER_ENAB;
1801 ap->state = ANEG_STATE_RESTART;
1804 case ANEG_STATE_RESTART:
1805 delta = ap->cur_time - ap->link_time;
1806 if (delta > ANEG_STATE_SETTLE_TIME) {
1807 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1809 ret = ANEG_TIMER_ENAB;
1813 case ANEG_STATE_DISABLE_LINK_OK:
1817 case ANEG_STATE_ABILITY_DETECT_INIT:
1818 ap->flags &= ~(MR_TOGGLE_TX);
1819 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1820 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1821 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1822 tw32_f(MAC_MODE, tp->mac_mode);
1825 ap->state = ANEG_STATE_ABILITY_DETECT;
1828 case ANEG_STATE_ABILITY_DETECT:
1829 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1830 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1834 case ANEG_STATE_ACK_DETECT_INIT:
1835 ap->txconfig |= ANEG_CFG_ACK;
1836 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1837 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1838 tw32_f(MAC_MODE, tp->mac_mode);
1841 ap->state = ANEG_STATE_ACK_DETECT;
1844 case ANEG_STATE_ACK_DETECT:
1845 if (ap->ack_match != 0) {
1846 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1847 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1848 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1850 ap->state = ANEG_STATE_AN_ENABLE;
1852 } else if (ap->ability_match != 0 &&
1853 ap->rxconfig == 0) {
1854 ap->state = ANEG_STATE_AN_ENABLE;
1858 case ANEG_STATE_COMPLETE_ACK_INIT:
1859 if (ap->rxconfig & ANEG_CFG_INVAL) {
1863 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1864 MR_LP_ADV_HALF_DUPLEX |
1865 MR_LP_ADV_SYM_PAUSE |
1866 MR_LP_ADV_ASYM_PAUSE |
1867 MR_LP_ADV_REMOTE_FAULT1 |
1868 MR_LP_ADV_REMOTE_FAULT2 |
1869 MR_LP_ADV_NEXT_PAGE |
1872 if (ap->rxconfig & ANEG_CFG_FD)
1873 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1874 if (ap->rxconfig & ANEG_CFG_HD)
1875 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1876 if (ap->rxconfig & ANEG_CFG_PS1)
1877 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1878 if (ap->rxconfig & ANEG_CFG_PS2)
1879 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1880 if (ap->rxconfig & ANEG_CFG_RF1)
1881 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1882 if (ap->rxconfig & ANEG_CFG_RF2)
1883 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1884 if (ap->rxconfig & ANEG_CFG_NP)
1885 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1887 ap->link_time = ap->cur_time;
1889 ap->flags ^= (MR_TOGGLE_TX);
1890 if (ap->rxconfig & 0x0008)
1891 ap->flags |= MR_TOGGLE_RX;
1892 if (ap->rxconfig & ANEG_CFG_NP)
1893 ap->flags |= MR_NP_RX;
1894 ap->flags |= MR_PAGE_RX;
1896 ap->state = ANEG_STATE_COMPLETE_ACK;
1897 ret = ANEG_TIMER_ENAB;
1900 case ANEG_STATE_COMPLETE_ACK:
1901 if (ap->ability_match != 0 &&
1902 ap->rxconfig == 0) {
1903 ap->state = ANEG_STATE_AN_ENABLE;
1906 delta = ap->cur_time - ap->link_time;
1907 if (delta > ANEG_STATE_SETTLE_TIME) {
1908 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1909 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1911 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1912 !(ap->flags & MR_NP_RX)) {
1913 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1921 case ANEG_STATE_IDLE_DETECT_INIT:
1922 ap->link_time = ap->cur_time;
1923 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1924 tw32_f(MAC_MODE, tp->mac_mode);
1927 ap->state = ANEG_STATE_IDLE_DETECT;
1928 ret = ANEG_TIMER_ENAB;
1931 case ANEG_STATE_IDLE_DETECT:
1932 if (ap->ability_match != 0 &&
1933 ap->rxconfig == 0) {
1934 ap->state = ANEG_STATE_AN_ENABLE;
1937 delta = ap->cur_time - ap->link_time;
1938 if (delta > ANEG_STATE_SETTLE_TIME) {
1939 /* XXX another gem from the Broadcom driver :( */
1940 ap->state = ANEG_STATE_LINK_OK;
1944 case ANEG_STATE_LINK_OK:
1945 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1949 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1950 /* ??? unimplemented */
1953 case ANEG_STATE_NEXT_PAGE_WAIT:
1954 /* ??? unimplemented */
1965 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
1969 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
1972 dig_status = tr32(SG_DIG_STATUS);
1974 if (dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
1975 *flags |= MR_LP_ADV_ASYM_PAUSE;
1976 if (dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
1977 *flags |= MR_LP_ADV_SYM_PAUSE;
1979 if ((dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1980 !(dig_status & (SG_DIG_AUTONEG_ERROR |
1981 SG_DIG_PARTNER_FAULT_MASK)))
1984 struct tg3_fiber_aneginfo aninfo;
1985 int status = ANEG_FAILED;
1989 tw32_f(MAC_TX_AUTO_NEG, 0);
1991 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1992 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1995 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1998 memset(&aninfo, 0, sizeof(aninfo));
1999 aninfo.flags |= MR_AN_ENABLE;
2000 aninfo.state = ANEG_STATE_UNKNOWN;
2001 aninfo.cur_time = 0;
2003 while (++tick < 195000) {
2004 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2005 if (status == ANEG_DONE || status == ANEG_FAILED)
2011 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2012 tw32_f(MAC_MODE, tp->mac_mode);
2015 *flags = aninfo.flags;
2017 if (status == ANEG_DONE &&
2018 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2019 MR_LP_ADV_FULL_DUPLEX)))
2026 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2029 u16 orig_active_speed;
2030 u8 orig_active_duplex;
2031 int current_link_up;
2035 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2036 TG3_FLAG_TX_PAUSE));
2037 orig_active_speed = tp->link_config.active_speed;
2038 orig_active_duplex = tp->link_config.active_duplex;
2040 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2041 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2045 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
2046 /* Allow time for the hardware to auto-negotiate (195ms) */
2047 unsigned int tick = 0;
2049 while (++tick < 195000) {
2050 if (tr32(SG_DIG_STATUS) & SG_DIG_AUTONEG_COMPLETE)
2055 printk(KERN_INFO PFX "%s: HW autoneg failed !\n",
2059 /* Reset when initting first time or we have a link. */
2060 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
2061 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2062 /* Set PLL lock range. */
2063 tg3_writephy(tp, 0x16, 0x8007);
2066 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2068 /* Wait for reset to complete. */
2069 /* XXX schedule_timeout() ... */
2070 for (i = 0; i < 500; i++)
2073 /* Config mode; select PMA/Ch 1 regs. */
2074 tg3_writephy(tp, 0x10, 0x8411);
2076 /* Enable auto-lock and comdet, select txclk for tx. */
2077 tg3_writephy(tp, 0x11, 0x0a10);
2079 tg3_writephy(tp, 0x18, 0x00a0);
2080 tg3_writephy(tp, 0x16, 0x41ff);
2082 /* Assert and deassert POR. */
2083 tg3_writephy(tp, 0x13, 0x0400);
2085 tg3_writephy(tp, 0x13, 0x0000);
2087 tg3_writephy(tp, 0x11, 0x0a50);
2089 tg3_writephy(tp, 0x11, 0x0a10);
2091 /* Wait for signal to stabilize */
2092 /* XXX schedule_timeout() ... */
2093 for (i = 0; i < 15000; i++)
2096 /* Deselect the channel register so we can read the PHYID
2099 tg3_writephy(tp, 0x10, 0x8011);
2102 /* Enable link change interrupt unless serdes polling. */
2103 if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
2104 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2106 tw32_f(MAC_EVENT, 0);
2109 current_link_up = 0;
2110 if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
2111 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2114 if (fiber_autoneg(tp, &flags)) {
2115 u32 local_adv, remote_adv;
2117 local_adv = ADVERTISE_PAUSE_CAP;
2119 if (flags & MR_LP_ADV_SYM_PAUSE)
2120 remote_adv |= LPA_PAUSE_CAP;
2121 if (flags & MR_LP_ADV_ASYM_PAUSE)
2122 remote_adv |= LPA_PAUSE_ASYM;
2124 tg3_setup_flow_control(tp, local_adv, remote_adv);
2127 TG3_FLAG_GOT_SERDES_FLOWCTL;
2128 current_link_up = 1;
2130 for (i = 0; i < 60; i++) {
2133 (MAC_STATUS_SYNC_CHANGED |
2134 MAC_STATUS_CFG_CHANGED));
2136 if ((tr32(MAC_STATUS) &
2137 (MAC_STATUS_SYNC_CHANGED |
2138 MAC_STATUS_CFG_CHANGED)) == 0)
2141 if (current_link_up == 0 &&
2142 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2143 current_link_up = 1;
2146 /* Forcing 1000FD link up. */
2147 current_link_up = 1;
2148 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2151 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2153 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2154 tw32_f(MAC_MODE, tp->mac_mode);
2157 tp->hw_status->status =
2158 (SD_STATUS_UPDATED |
2159 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2161 for (i = 0; i < 100; i++) {
2164 (MAC_STATUS_SYNC_CHANGED |
2165 MAC_STATUS_CFG_CHANGED));
2167 if ((tr32(MAC_STATUS) &
2168 (MAC_STATUS_SYNC_CHANGED |
2169 MAC_STATUS_CFG_CHANGED)) == 0)
2173 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
2174 current_link_up = 0;
2176 if (current_link_up == 1) {
2177 tp->link_config.active_speed = SPEED_1000;
2178 tp->link_config.active_duplex = DUPLEX_FULL;
2179 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2180 LED_CTRL_LNKLED_OVERRIDE |
2181 LED_CTRL_1000MBPS_ON));
2183 tp->link_config.active_speed = SPEED_INVALID;
2184 tp->link_config.active_duplex = DUPLEX_INVALID;
2185 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2186 LED_CTRL_LNKLED_OVERRIDE |
2187 LED_CTRL_TRAFFIC_OVERRIDE));
2190 if (current_link_up != netif_carrier_ok(tp->dev)) {
2191 if (current_link_up)
2192 netif_carrier_on(tp->dev);
2194 netif_carrier_off(tp->dev);
2195 tg3_link_report(tp);
2198 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2200 if (orig_pause_cfg != now_pause_cfg ||
2201 orig_active_speed != tp->link_config.active_speed ||
2202 orig_active_duplex != tp->link_config.active_duplex)
2203 tg3_link_report(tp);
2206 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
2207 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
2209 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
2210 tw32_f(MAC_MODE, tp->mac_mode);
2218 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2222 if (tp->phy_id == PHY_ID_SERDES) {
2223 err = tg3_setup_fiber_phy(tp, force_reset);
2225 err = tg3_setup_copper_phy(tp, force_reset);
2228 if (tp->link_config.active_speed == SPEED_1000 &&
2229 tp->link_config.active_duplex == DUPLEX_HALF)
2230 tw32(MAC_TX_LENGTHS,
2231 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2232 (6 << TX_LENGTHS_IPG_SHIFT) |
2233 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2235 tw32(MAC_TX_LENGTHS,
2236 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2237 (6 << TX_LENGTHS_IPG_SHIFT) |
2238 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2240 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2241 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2242 if (netif_carrier_ok(tp->dev)) {
2243 tw32(HOSTCC_STAT_COAL_TICKS,
2244 DEFAULT_STAT_COAL_TICKS);
2246 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2253 /* Tigon3 never reports partial packet sends. So we do not
2254 * need special logic to handle SKBs that have not had all
2255 * of their frags sent yet, like SunGEM does.
2257 static void tg3_tx(struct tg3 *tp)
2259 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2260 u32 sw_idx = tp->tx_cons;
2262 while (sw_idx != hw_idx) {
2263 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2264 struct sk_buff *skb = ri->skb;
2267 if (unlikely(skb == NULL))
2270 pci_unmap_single(tp->pdev,
2271 pci_unmap_addr(ri, mapping),
2277 sw_idx = NEXT_TX(sw_idx);
2279 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2280 if (unlikely(sw_idx == hw_idx))
2283 ri = &tp->tx_buffers[sw_idx];
2284 if (unlikely(ri->skb != NULL))
2287 pci_unmap_page(tp->pdev,
2288 pci_unmap_addr(ri, mapping),
2289 skb_shinfo(skb)->frags[i].size,
2292 sw_idx = NEXT_TX(sw_idx);
2295 dev_kfree_skb_irq(skb);
2298 tp->tx_cons = sw_idx;
2300 if (netif_queue_stopped(tp->dev) &&
2301 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2302 netif_wake_queue(tp->dev);
2305 /* Returns size of skb allocated or < 0 on error.
2307 * We only need to fill in the address because the other members
2308 * of the RX descriptor are invariant, see tg3_init_rings.
2310 * Note the purposeful assymetry of cpu vs. chip accesses. For
2311 * posting buffers we only dirty the first cache line of the RX
2312 * descriptor (containing the address). Whereas for the RX status
2313 * buffers the cpu only reads the last cacheline of the RX descriptor
2314 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2316 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2317 int src_idx, u32 dest_idx_unmasked)
2319 struct tg3_rx_buffer_desc *desc;
2320 struct ring_info *map, *src_map;
2321 struct sk_buff *skb;
2323 int skb_size, dest_idx;
2326 switch (opaque_key) {
2327 case RXD_OPAQUE_RING_STD:
2328 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2329 desc = &tp->rx_std[dest_idx];
2330 map = &tp->rx_std_buffers[dest_idx];
2332 src_map = &tp->rx_std_buffers[src_idx];
2333 skb_size = RX_PKT_BUF_SZ;
2336 case RXD_OPAQUE_RING_JUMBO:
2337 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2338 desc = &tp->rx_jumbo[dest_idx];
2339 map = &tp->rx_jumbo_buffers[dest_idx];
2341 src_map = &tp->rx_jumbo_buffers[src_idx];
2342 skb_size = RX_JUMBO_PKT_BUF_SZ;
2349 /* Do not overwrite any of the map or rp information
2350 * until we are sure we can commit to a new buffer.
2352 * Callers depend upon this behavior and assume that
2353 * we leave everything unchanged if we fail.
2355 skb = dev_alloc_skb(skb_size);
2360 skb_reserve(skb, tp->rx_offset);
2362 mapping = pci_map_single(tp->pdev, skb->data,
2363 skb_size - tp->rx_offset,
2364 PCI_DMA_FROMDEVICE);
2367 pci_unmap_addr_set(map, mapping, mapping);
2369 if (src_map != NULL)
2370 src_map->skb = NULL;
2372 desc->addr_hi = ((u64)mapping >> 32);
2373 desc->addr_lo = ((u64)mapping & 0xffffffff);
2378 /* We only need to move over in the address because the other
2379 * members of the RX descriptor are invariant. See notes above
2380 * tg3_alloc_rx_skb for full details.
2382 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2383 int src_idx, u32 dest_idx_unmasked)
2385 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2386 struct ring_info *src_map, *dest_map;
2389 switch (opaque_key) {
2390 case RXD_OPAQUE_RING_STD:
2391 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2392 dest_desc = &tp->rx_std[dest_idx];
2393 dest_map = &tp->rx_std_buffers[dest_idx];
2394 src_desc = &tp->rx_std[src_idx];
2395 src_map = &tp->rx_std_buffers[src_idx];
2398 case RXD_OPAQUE_RING_JUMBO:
2399 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2400 dest_desc = &tp->rx_jumbo[dest_idx];
2401 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2402 src_desc = &tp->rx_jumbo[src_idx];
2403 src_map = &tp->rx_jumbo_buffers[src_idx];
2410 dest_map->skb = src_map->skb;
2411 pci_unmap_addr_set(dest_map, mapping,
2412 pci_unmap_addr(src_map, mapping));
2413 dest_desc->addr_hi = src_desc->addr_hi;
2414 dest_desc->addr_lo = src_desc->addr_lo;
2416 src_map->skb = NULL;
2419 #if TG3_VLAN_TAG_USED
2420 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2422 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2426 /* The RX ring scheme is composed of multiple rings which post fresh
2427 * buffers to the chip, and one special ring the chip uses to report
2428 * status back to the host.
2430 * The special ring reports the status of received packets to the
2431 * host. The chip does not write into the original descriptor the
2432 * RX buffer was obtained from. The chip simply takes the original
2433 * descriptor as provided by the host, updates the status and length
2434 * field, then writes this into the next status ring entry.
2436 * Each ring the host uses to post buffers to the chip is described
2437 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2438 * it is first placed into the on-chip ram. When the packet's length
2439 * is known, it walks down the TG3_BDINFO entries to select the ring.
2440 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2441 * which is within the range of the new packet's length is chosen.
2443 * The "separate ring for rx status" scheme may sound queer, but it makes
2444 * sense from a cache coherency perspective. If only the host writes
2445 * to the buffer post rings, and only the chip writes to the rx status
2446 * rings, then cache lines never move beyond shared-modified state.
2447 * If both the host and chip were to write into the same ring, cache line
2448 * eviction could occur since both entities want it in an exclusive state.
2450 static int tg3_rx(struct tg3 *tp, int budget)
2453 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2457 hw_idx = tp->hw_status->idx[0].rx_producer;
2459 * We need to order the read of hw_idx and the read of
2460 * the opaque cookie.
2463 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2466 while (sw_idx != hw_idx && budget > 0) {
2467 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2469 struct sk_buff *skb;
2470 dma_addr_t dma_addr;
2471 u32 opaque_key, desc_idx, *post_ptr;
2473 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2474 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2475 if (opaque_key == RXD_OPAQUE_RING_STD) {
2476 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2478 skb = tp->rx_std_buffers[desc_idx].skb;
2479 post_ptr = &tp->rx_std_ptr;
2480 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2481 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2483 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2484 post_ptr = &tp->rx_jumbo_ptr;
2487 goto next_pkt_nopost;
2490 work_mask |= opaque_key;
2492 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2493 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2495 tg3_recycle_rx(tp, opaque_key,
2496 desc_idx, *post_ptr);
2498 /* Other statistics kept track of by card. */
2499 tp->net_stats.rx_dropped++;
2503 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2505 if (len > RX_COPY_THRESHOLD) {
2508 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2509 desc_idx, *post_ptr);
2513 pci_unmap_single(tp->pdev, dma_addr,
2514 skb_size - tp->rx_offset,
2515 PCI_DMA_FROMDEVICE);
2519 struct sk_buff *copy_skb;
2521 tg3_recycle_rx(tp, opaque_key,
2522 desc_idx, *post_ptr);
2524 copy_skb = dev_alloc_skb(len + 2);
2525 if (copy_skb == NULL)
2526 goto drop_it_no_recycle;
2528 copy_skb->dev = tp->dev;
2529 skb_reserve(copy_skb, 2);
2530 skb_put(copy_skb, len);
2531 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2532 memcpy(copy_skb->data, skb->data, len);
2533 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2535 /* We'll reuse the original ring buffer. */
2539 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2540 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2541 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2542 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2543 skb->ip_summed = CHECKSUM_UNNECESSARY;
2545 skb->ip_summed = CHECKSUM_NONE;
2547 skb->protocol = eth_type_trans(skb, tp->dev);
2548 #if TG3_VLAN_TAG_USED
2549 if (tp->vlgrp != NULL &&
2550 desc->type_flags & RXD_FLAG_VLAN) {
2551 tg3_vlan_rx(tp, skb,
2552 desc->err_vlan & RXD_VLAN_MASK);
2555 netif_receive_skb(skb);
2557 tp->dev->last_rx = jiffies;
2565 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2568 /* ACK the status ring. */
2569 tp->rx_rcb_ptr = rx_rcb_ptr;
2570 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2571 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2573 /* Refill RX ring(s). */
2574 if (work_mask & RXD_OPAQUE_RING_STD) {
2575 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2576 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2579 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2580 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2581 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2588 static int tg3_poll(struct net_device *netdev, int *budget)
2590 struct tg3 *tp = netdev_priv(netdev);
2591 struct tg3_hw_status *sblk = tp->hw_status;
2592 unsigned long flags;
2595 spin_lock_irqsave(&tp->lock, flags);
2597 /* handle link change and other phy events */
2598 if (!(tp->tg3_flags &
2599 (TG3_FLAG_USE_LINKCHG_REG |
2600 TG3_FLAG_POLL_SERDES))) {
2601 if (sblk->status & SD_STATUS_LINK_CHG) {
2602 sblk->status = SD_STATUS_UPDATED |
2603 (sblk->status & ~SD_STATUS_LINK_CHG);
2604 tg3_setup_phy(tp, 0);
2608 /* run TX completion thread */
2609 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2610 spin_lock(&tp->tx_lock);
2612 spin_unlock(&tp->tx_lock);
2615 spin_unlock_irqrestore(&tp->lock, flags);
2617 /* run RX thread, within the bounds set by NAPI.
2618 * All RX "locking" is done by ensuring outside
2619 * code synchronizes with dev->poll()
2622 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2623 int orig_budget = *budget;
2626 if (orig_budget > netdev->quota)
2627 orig_budget = netdev->quota;
2629 work_done = tg3_rx(tp, orig_budget);
2631 *budget -= work_done;
2632 netdev->quota -= work_done;
2634 if (work_done >= orig_budget)
2638 /* if no more work, tell net stack and NIC we're done */
2640 spin_lock_irqsave(&tp->lock, flags);
2641 __netif_rx_complete(netdev);
2642 tg3_enable_ints(tp);
2643 spin_unlock_irqrestore(&tp->lock, flags);
2646 return (done ? 0 : 1);
2649 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2651 struct tg3_hw_status *sblk = tp->hw_status;
2652 unsigned int work_exists = 0;
2654 /* check for phy events */
2655 if (!(tp->tg3_flags &
2656 (TG3_FLAG_USE_LINKCHG_REG |
2657 TG3_FLAG_POLL_SERDES))) {
2658 if (sblk->status & SD_STATUS_LINK_CHG)
2661 /* check for RX/TX work to do */
2662 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2663 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2669 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2671 struct net_device *dev = dev_id;
2672 struct tg3 *tp = netdev_priv(dev);
2673 struct tg3_hw_status *sblk = tp->hw_status;
2674 unsigned long flags;
2675 unsigned int handled = 1;
2677 spin_lock_irqsave(&tp->lock, flags);
2679 if (sblk->status & SD_STATUS_UPDATED) {
2681 * writing any value to intr-mbox-0 clears PCI INTA# and
2682 * chip-internal interrupt pending events.
2683 * writing non-zero to intr-mbox-0 additional tells the
2684 * NIC to stop sending us irqs, engaging "in-intr-handler"
2687 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2690 * Flush PCI write. This also guarantees that our
2691 * status block has been flushed to host memory.
2693 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2694 sblk->status &= ~SD_STATUS_UPDATED;
2696 if (likely(tg3_has_work(dev, tp)))
2697 netif_rx_schedule(dev); /* schedule NAPI poll */
2699 /* no work, shared interrupt perhaps? re-enable
2700 * interrupts, and flush that PCI write
2702 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2704 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2706 } else { /* shared interrupt */
2710 spin_unlock_irqrestore(&tp->lock, flags);
2712 return IRQ_RETVAL(handled);
2715 static int tg3_init_hw(struct tg3 *);
2716 static int tg3_halt(struct tg3 *);
2718 #ifdef CONFIG_NET_POLL_CONTROLLER
2719 static void tg3_poll_controller(struct net_device *dev)
2721 tg3_interrupt(dev->irq, dev, NULL);
2725 static void tg3_reset_task(void *_data)
2727 struct tg3 *tp = _data;
2728 unsigned int restart_timer;
2732 spin_lock_irq(&tp->lock);
2733 spin_lock(&tp->tx_lock);
2735 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2736 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2741 spin_unlock(&tp->tx_lock);
2742 spin_unlock_irq(&tp->lock);
2744 tg3_netif_start(tp);
2747 mod_timer(&tp->timer, jiffies + 1);
2750 static void tg3_tx_timeout(struct net_device *dev)
2752 struct tg3 *tp = netdev_priv(dev);
2754 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2757 schedule_work(&tp->reset_task);
2760 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2762 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2763 u32 guilty_entry, int guilty_len,
2764 u32 last_plus_one, u32 *start, u32 mss)
2766 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2767 dma_addr_t new_addr;
2776 /* New SKB is guaranteed to be linear. */
2778 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2780 tg3_set_txd(tp, entry, new_addr, new_skb->len,
2781 (skb->ip_summed == CHECKSUM_HW) ?
2782 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2783 *start = NEXT_TX(entry);
2785 /* Now clean up the sw ring entries. */
2787 while (entry != last_plus_one) {
2791 len = skb_headlen(skb);
2793 len = skb_shinfo(skb)->frags[i-1].size;
2794 pci_unmap_single(tp->pdev,
2795 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2796 len, PCI_DMA_TODEVICE);
2798 tp->tx_buffers[entry].skb = new_skb;
2799 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2801 tp->tx_buffers[entry].skb = NULL;
2803 entry = NEXT_TX(entry);
2811 static void tg3_set_txd(struct tg3 *tp, int entry,
2812 dma_addr_t mapping, int len, u32 flags,
2815 int is_end = (mss_and_is_end & 0x1);
2816 u32 mss = (mss_and_is_end >> 1);
2820 flags |= TXD_FLAG_END;
2821 if (flags & TXD_FLAG_VLAN) {
2822 vlan_tag = flags >> 16;
2825 vlan_tag |= (mss << TXD_MSS_SHIFT);
2826 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2827 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2829 txd->addr_hi = ((u64) mapping >> 32);
2830 txd->addr_lo = ((u64) mapping & 0xffffffff);
2831 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2832 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2834 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2839 NIC_SRAM_TX_BUFFER_DESC);
2840 txd += (entry * TXD_SIZE);
2842 /* Save some PIOs */
2843 if (sizeof(dma_addr_t) != sizeof(u32))
2844 writel(((u64) mapping >> 32),
2845 txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2847 writel(((u64) mapping & 0xffffffff),
2848 txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2849 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2850 if (txr->prev_vlan_tag != vlan_tag) {
2851 writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2852 txr->prev_vlan_tag = vlan_tag;
2857 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2859 u32 base = (u32) mapping & 0xffffffff;
2861 return ((base > 0xffffdcc0) &&
2862 (base + len + 8 < base));
2865 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867 struct tg3 *tp = netdev_priv(dev);
2870 u32 len, entry, base_flags, mss;
2871 int would_hit_hwbug;
2872 unsigned long flags;
2874 len = skb_headlen(skb);
2876 /* No BH disabling for tx_lock here. We are running in BH disabled
2877 * context and TX reclaim runs via tp->poll inside of a software
2878 * interrupt. Rejoice!
2880 * Actually, things are not so simple. If we are to take a hw
2881 * IRQ here, we can deadlock, consider:
2890 * spin on tp->tx_lock
2892 * So we really do need to disable interrupts when taking
2895 spin_lock_irqsave(&tp->tx_lock, flags);
2897 /* This is a hard error, log it. */
2898 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2899 netif_stop_queue(dev);
2900 spin_unlock_irqrestore(&tp->tx_lock, flags);
2901 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2906 entry = tp->tx_prod;
2908 if (skb->ip_summed == CHECKSUM_HW)
2909 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2910 #if TG3_TSO_SUPPORT != 0
2912 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2913 (mss = skb_shinfo(skb)->tso_size) != 0) {
2914 int tcp_opt_len, ip_tcp_len;
2916 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2917 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2919 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2920 TXD_FLAG_CPU_POST_DMA);
2922 skb->nh.iph->check = 0;
2923 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2924 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2929 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2932 tsflags = ((skb->nh.iph->ihl - 5) +
2933 (tcp_opt_len >> 2));
2934 mss |= (tsflags << 11);
2937 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2940 tsflags = ((skb->nh.iph->ihl - 5) +
2941 (tcp_opt_len >> 2));
2942 base_flags |= tsflags << 12;
2949 #if TG3_VLAN_TAG_USED
2950 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2951 base_flags |= (TXD_FLAG_VLAN |
2952 (vlan_tx_tag_get(skb) << 16));
2955 /* Queue skb data, a.k.a. the main skb fragment. */
2956 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2958 tp->tx_buffers[entry].skb = skb;
2959 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2961 would_hit_hwbug = 0;
2963 if (tg3_4g_overflow_test(mapping, len))
2964 would_hit_hwbug = entry + 1;
2966 tg3_set_txd(tp, entry, mapping, len, base_flags,
2967 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2969 entry = NEXT_TX(entry);
2971 /* Now loop through additional data fragments, and queue them. */
2972 if (skb_shinfo(skb)->nr_frags > 0) {
2973 unsigned int i, last;
2975 last = skb_shinfo(skb)->nr_frags - 1;
2976 for (i = 0; i <= last; i++) {
2977 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2980 mapping = pci_map_page(tp->pdev,
2983 len, PCI_DMA_TODEVICE);
2985 tp->tx_buffers[entry].skb = NULL;
2986 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2988 if (tg3_4g_overflow_test(mapping, len)) {
2989 /* Only one should match. */
2990 if (would_hit_hwbug)
2992 would_hit_hwbug = entry + 1;
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
2996 tg3_set_txd(tp, entry, mapping, len,
2997 base_flags, (i == last)|(mss << 1));
2999 tg3_set_txd(tp, entry, mapping, len,
3000 base_flags, (i == last));
3002 entry = NEXT_TX(entry);
3006 if (would_hit_hwbug) {
3007 u32 last_plus_one = entry;
3009 unsigned int len = 0;
3011 would_hit_hwbug -= 1;
3012 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3013 entry &= (TG3_TX_RING_SIZE - 1);
3016 while (entry != last_plus_one) {
3018 len = skb_headlen(skb);
3020 len = skb_shinfo(skb)->frags[i-1].size;
3022 if (entry == would_hit_hwbug)
3026 entry = NEXT_TX(entry);
3030 /* If the workaround fails due to memory/mapping
3031 * failure, silently drop this packet.
3033 if (tigon3_4gb_hwbug_workaround(tp, skb,
3042 /* Packets are ready, update Tx producer idx local and on card. */
3043 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3044 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
3045 TG3_64BIT_REG_LOW), entry);
3047 /* First, make sure tg3 sees last descriptor fully
3050 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3051 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
3053 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3054 TG3_64BIT_REG_LOW), entry);
3057 tp->tx_prod = entry;
3058 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3059 netif_stop_queue(dev);
3062 spin_unlock_irqrestore(&tp->tx_lock, flags);
3064 dev->trans_start = jiffies;
3069 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3074 if (new_mtu > ETH_DATA_LEN)
3075 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3077 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3080 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3082 struct tg3 *tp = netdev_priv(dev);
3084 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3087 if (!netif_running(dev)) {
3088 /* We'll just catch it later when the
3091 tg3_set_mtu(dev, tp, new_mtu);
3096 spin_lock_irq(&tp->lock);
3097 spin_lock(&tp->tx_lock);
3101 tg3_set_mtu(dev, tp, new_mtu);
3105 spin_unlock(&tp->tx_lock);
3106 spin_unlock_irq(&tp->lock);
3107 tg3_netif_start(tp);
3112 /* Free up pending packets in all rx/tx rings.
3114 * The chip has been shut down and the driver detached from
3115 * the networking, so no interrupts or new tx packets will
3116 * end up in the driver. tp->{tx,}lock is not held and we are not
3117 * in an interrupt context and thus may sleep.
3119 static void tg3_free_rings(struct tg3 *tp)
3121 struct ring_info *rxp;
3124 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3125 rxp = &tp->rx_std_buffers[i];
3127 if (rxp->skb == NULL)
3129 pci_unmap_single(tp->pdev,
3130 pci_unmap_addr(rxp, mapping),
3131 RX_PKT_BUF_SZ - tp->rx_offset,
3132 PCI_DMA_FROMDEVICE);
3133 dev_kfree_skb_any(rxp->skb);
3137 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3138 rxp = &tp->rx_jumbo_buffers[i];
3140 if (rxp->skb == NULL)
3142 pci_unmap_single(tp->pdev,
3143 pci_unmap_addr(rxp, mapping),
3144 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3145 PCI_DMA_FROMDEVICE);
3146 dev_kfree_skb_any(rxp->skb);
3150 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3151 struct tx_ring_info *txp;
3152 struct sk_buff *skb;
3155 txp = &tp->tx_buffers[i];
3163 pci_unmap_single(tp->pdev,
3164 pci_unmap_addr(txp, mapping),
3171 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3172 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3173 pci_unmap_page(tp->pdev,
3174 pci_unmap_addr(txp, mapping),
3175 skb_shinfo(skb)->frags[j].size,
3180 dev_kfree_skb_any(skb);
3184 /* Initialize tx/rx rings for packet processing.
3186 * The chip has been shut down and the driver detached from
3187 * the networking, so no interrupts or new tx packets will
3188 * end up in the driver. tp->{tx,}lock are held and thus
3191 static void tg3_init_rings(struct tg3 *tp)
3193 unsigned long start, end;
3196 /* Free up all the SKBs. */
3199 /* Zero out all descriptors. */
3200 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3201 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3202 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3204 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3205 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3209 NIC_SRAM_TX_BUFFER_DESC);
3210 end = start + TG3_TX_RING_BYTES;
3211 while (start < end) {
3215 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3216 tp->tx_buffers[i].prev_vlan_tag = 0;
3219 /* Initialize invariants of the rings, we only set this
3220 * stuff once. This works because the card does not
3221 * write into the rx buffer posting rings.
3223 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3224 struct tg3_rx_buffer_desc *rxd;
3226 rxd = &tp->rx_std[i];
3227 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3229 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3230 rxd->opaque = (RXD_OPAQUE_RING_STD |
3231 (i << RXD_OPAQUE_INDEX_SHIFT));
3234 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3235 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3236 struct tg3_rx_buffer_desc *rxd;
3238 rxd = &tp->rx_jumbo[i];
3239 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3241 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3243 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3244 (i << RXD_OPAQUE_INDEX_SHIFT));
3248 /* Now allocate fresh SKBs for each rx ring. */
3249 for (i = 0; i < tp->rx_pending; i++) {
3250 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3255 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3256 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3257 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3265 * Must not be invoked with interrupt sources disabled and
3266 * the hardware shutdown down.
3268 static void tg3_free_consistent(struct tg3 *tp)
3270 if (tp->rx_std_buffers) {
3271 kfree(tp->rx_std_buffers);
3272 tp->rx_std_buffers = NULL;
3275 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3276 tp->rx_std, tp->rx_std_mapping);
3280 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3281 tp->rx_jumbo, tp->rx_jumbo_mapping);
3282 tp->rx_jumbo = NULL;
3285 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3286 tp->rx_rcb, tp->rx_rcb_mapping);
3290 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3291 tp->tx_ring, tp->tx_desc_mapping);
3294 if (tp->hw_status) {
3295 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3296 tp->hw_status, tp->status_mapping);
3297 tp->hw_status = NULL;
3300 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3301 tp->hw_stats, tp->stats_mapping);
3302 tp->hw_stats = NULL;
3307 * Must not be invoked with interrupt sources disabled and
3308 * the hardware shutdown down. Can sleep.
3310 static int tg3_alloc_consistent(struct tg3 *tp)
3312 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3314 TG3_RX_JUMBO_RING_SIZE)) +
3315 (sizeof(struct tx_ring_info) *
3318 if (!tp->rx_std_buffers)
3321 memset(tp->rx_std_buffers, 0,
3322 (sizeof(struct ring_info) *
3324 TG3_RX_JUMBO_RING_SIZE)) +
3325 (sizeof(struct tx_ring_info) *
3328 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3329 tp->tx_buffers = (struct tx_ring_info *)
3330 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3332 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3333 &tp->rx_std_mapping);
3337 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3338 &tp->rx_jumbo_mapping);
3343 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3344 &tp->rx_rcb_mapping);
3348 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3349 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3350 &tp->tx_desc_mapping);
3355 tp->tx_desc_mapping = 0;
3358 tp->hw_status = pci_alloc_consistent(tp->pdev,
3360 &tp->status_mapping);
3364 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3365 sizeof(struct tg3_hw_stats),
3366 &tp->stats_mapping);
3370 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3371 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3376 tg3_free_consistent(tp);
3380 #define MAX_WAIT_CNT 1000
3382 /* To stop a block, clear the enable bit and poll till it
3383 * clears. tp->lock is held.
3385 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3398 /* We can't enable/disable these bits of the
3399 * 5705/5750, just say success.
3412 for (i = 0; i < MAX_WAIT_CNT; i++) {
3415 if ((val & enable_bit) == 0)
3419 if (i == MAX_WAIT_CNT) {
3420 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3421 "ofs=%lx enable_bit=%x\n",
3429 /* tp->lock is held. */
3430 static int tg3_abort_hw(struct tg3 *tp)
3434 tg3_disable_ints(tp);
3436 tp->rx_mode &= ~RX_MODE_ENABLE;
3437 tw32_f(MAC_RX_MODE, tp->rx_mode);
3440 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3441 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3442 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3443 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3444 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3445 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3447 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3448 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3449 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3450 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3451 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3452 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3453 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3457 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3458 tw32_f(MAC_MODE, tp->mac_mode);
3461 tp->tx_mode &= ~TX_MODE_ENABLE;
3462 tw32_f(MAC_TX_MODE, tp->tx_mode);
3464 for (i = 0; i < MAX_WAIT_CNT; i++) {
3466 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3469 if (i >= MAX_WAIT_CNT) {
3470 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3471 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3472 tp->dev->name, tr32(MAC_TX_MODE));
3476 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3477 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3478 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3480 tw32(FTQ_RESET, 0xffffffff);
3481 tw32(FTQ_RESET, 0x00000000);
3483 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3484 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3489 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3491 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3497 /* tp->lock is held. */
3498 static int tg3_nvram_lock(struct tg3 *tp)
3500 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3503 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3504 for (i = 0; i < 8000; i++) {
3505 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3515 /* tp->lock is held. */
3516 static void tg3_nvram_unlock(struct tg3 *tp)
3518 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3519 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3522 /* tp->lock is held. */
3523 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3525 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3526 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3528 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3530 case RESET_KIND_INIT:
3531 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3535 case RESET_KIND_SHUTDOWN:
3536 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3540 case RESET_KIND_SUSPEND:
3541 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3551 /* tp->lock is held. */
3552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3554 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3556 case RESET_KIND_INIT:
3557 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3558 DRV_STATE_START_DONE);
3561 case RESET_KIND_SHUTDOWN:
3562 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3563 DRV_STATE_UNLOAD_DONE);
3572 /* tp->lock is held. */
3573 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3575 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3577 case RESET_KIND_INIT:
3578 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3582 case RESET_KIND_SHUTDOWN:
3583 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3587 case RESET_KIND_SUSPEND:
3588 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3598 /* tp->lock is held. */
3599 static int tg3_chip_reset(struct tg3 *tp)
3605 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704))
3609 * We must avoid the readl() that normally takes place.
3610 * It locks machines, causes machine checks, and other
3611 * fun things. So, temporarily disable the 5701
3612 * hardware workaround, while we do the reset.
3614 flags_save = tp->tg3_flags;
3615 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3618 val = GRC_MISC_CFG_CORECLK_RESET;
3620 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3621 if (tr32(0x7e2c) == 0x60) {
3624 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3625 tw32(GRC_MISC_CFG, (1 << 29));
3630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3632 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3633 tw32(GRC_MISC_CFG, val);
3635 /* restore 5701 hardware bug workaround flag */
3636 tp->tg3_flags = flags_save;
3638 /* Unfortunately, we have to delay before the PCI read back.
3639 * Some 575X chips even will not respond to a PCI cfg access
3640 * when the reset command is given to the chip.
3642 * How do these hardware designers expect things to work
3643 * properly if the PCI write is posted for a long period
3644 * of time? It is always necessary to have some method by
3645 * which a register read back can occur to push the write
3646 * out which does the reset.
3648 * For most tg3 variants the trick below was working.
3653 /* Flush PCI posted writes. The normal MMIO registers
3654 * are inaccessible at this time so this is the only
3655 * way to make this reliably (actually, this is no longer
3656 * the case, see above). I tried to use indirect
3657 * register read/write but this upset some 5701 variants.
3659 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3663 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3664 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3668 /* Wait for link training to complete. */
3669 for (i = 0; i < 5000; i++)
3672 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3673 pci_write_config_dword(tp->pdev, 0xc4,
3674 cfg_val | (1 << 15));
3676 /* Set PCIE max payload size and clear error status. */
3677 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3680 /* Re-enable indirect register accesses. */
3681 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3682 tp->misc_host_ctrl);
3684 /* Set MAX PCI retry to zero. */
3685 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3686 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3687 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3688 val |= PCISTATE_RETRY_SAME_DMA;
3689 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3691 pci_restore_state(tp->pdev, tp->pci_cfg_state);
3693 /* Make sure PCI-X relaxed ordering bit is clear. */
3694 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3695 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3696 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3698 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3700 tw32(GRC_MODE, tp->grc_mode);
3702 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3703 u32 val = tr32(0xc4);
3705 tw32(0xc4, val | (1 << 15));
3708 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3709 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3710 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3711 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3712 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3713 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3716 if (tp->phy_id == PHY_ID_SERDES) {
3717 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3718 tw32_f(MAC_MODE, tp->mac_mode);
3720 tw32_f(MAC_MODE, 0);
3723 /* Wait for firmware initialization to complete. */
3724 for (i = 0; i < 100000; i++) {
3725 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3726 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3731 !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3732 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3733 "firmware will not restart magic=%08x\n",
3734 tp->dev->name, val);
3738 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3739 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3740 u32 val = tr32(0x7c00);
3742 tw32(0x7c00, val | (1 << 25));
3745 /* Reprobe ASF enable state. */
3746 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3747 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3748 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3749 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3752 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3753 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3754 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3756 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3763 /* tp->lock is held. */
3764 static void tg3_stop_fw(struct tg3 *tp)
3766 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3770 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3771 val = tr32(GRC_RX_CPU_EVENT);
3773 tw32(GRC_RX_CPU_EVENT, val);
3775 /* Wait for RX cpu to ACK the event. */
3776 for (i = 0; i < 100; i++) {
3777 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3784 /* tp->lock is held. */
3785 static int tg3_halt(struct tg3 *tp)
3791 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3794 err = tg3_chip_reset(tp);
3796 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3797 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3805 #define TG3_FW_RELEASE_MAJOR 0x0
3806 #define TG3_FW_RELASE_MINOR 0x0
3807 #define TG3_FW_RELEASE_FIX 0x0
3808 #define TG3_FW_START_ADDR 0x08000000
3809 #define TG3_FW_TEXT_ADDR 0x08000000
3810 #define TG3_FW_TEXT_LEN 0x9c0
3811 #define TG3_FW_RODATA_ADDR 0x080009c0
3812 #define TG3_FW_RODATA_LEN 0x60
3813 #define TG3_FW_DATA_ADDR 0x08000a40
3814 #define TG3_FW_DATA_LEN 0x20
3815 #define TG3_FW_SBSS_ADDR 0x08000a60
3816 #define TG3_FW_SBSS_LEN 0xc
3817 #define TG3_FW_BSS_ADDR 0x08000a70
3818 #define TG3_FW_BSS_LEN 0x10
3820 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3821 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3822 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3823 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3824 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3825 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3826 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3827 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3828 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3829 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3830 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3831 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3832 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3833 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3834 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3835 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3836 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3837 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3838 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3839 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3840 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3841 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3842 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3843 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3844 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3845 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3847 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3848 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3849 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3850 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3851 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3852 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3853 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3854 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3855 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3856 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3857 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3858 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3859 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3860 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3861 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3862 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3863 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3864 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3865 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3866 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3867 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3868 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3869 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3870 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3871 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3872 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3873 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3874 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3875 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3876 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3877 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3878 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3879 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3880 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3881 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3882 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3883 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3884 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3885 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3886 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3887 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3888 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3889 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3890 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3891 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3892 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3893 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3894 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3895 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3896 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3897 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3898 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3899 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3900 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3901 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3902 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3903 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3904 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3905 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3906 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3907 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3908 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3909 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
3910 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
3911 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
3914 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
3915 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
3916 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
3917 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
3918 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
3922 #if 0 /* All zeros, don't eat up space with it. */
3923 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
3924 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3925 0x00000000, 0x00000000, 0x00000000, 0x00000000
3929 #define RX_CPU_SCRATCH_BASE 0x30000
3930 #define RX_CPU_SCRATCH_SIZE 0x04000
3931 #define TX_CPU_SCRATCH_BASE 0x34000
3932 #define TX_CPU_SCRATCH_SIZE 0x04000
3934 /* tp->lock is held. */
3935 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3939 if (offset == TX_CPU_BASE &&
3940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3943 if (offset == RX_CPU_BASE) {
3944 for (i = 0; i < 10000; i++) {
3945 tw32(offset + CPU_STATE, 0xffffffff);
3946 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3947 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3951 tw32(offset + CPU_STATE, 0xffffffff);
3952 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3955 for (i = 0; i < 10000; i++) {
3956 tw32(offset + CPU_STATE, 0xffffffff);
3957 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3958 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3964 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
3967 (offset == RX_CPU_BASE ? "RX" : "TX"));
3974 unsigned int text_base;
3975 unsigned int text_len;
3977 unsigned int rodata_base;
3978 unsigned int rodata_len;
3980 unsigned int data_base;
3981 unsigned int data_len;
3985 /* tp->lock is held. */
3986 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
3987 int cpu_scratch_size, struct fw_info *info)
3990 u32 orig_tg3_flags = tp->tg3_flags;
3991 void (*write_op)(struct tg3 *, u32, u32);
3993 if (cpu_base == TX_CPU_BASE &&
3994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3995 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
3996 "TX cpu firmware on %s which is 5705.\n",
4001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4002 write_op = tg3_write_mem;
4004 write_op = tg3_write_indirect_reg32;
4006 /* Force use of PCI config space for indirect register
4009 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4011 err = tg3_halt_cpu(tp, cpu_base);
4015 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4016 write_op(tp, cpu_scratch_base + i, 0);
4017 tw32(cpu_base + CPU_STATE, 0xffffffff);
4018 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4019 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4020 write_op(tp, (cpu_scratch_base +
4021 (info->text_base & 0xffff) +
4024 info->text_data[i] : 0));
4025 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4026 write_op(tp, (cpu_scratch_base +
4027 (info->rodata_base & 0xffff) +
4029 (info->rodata_data ?
4030 info->rodata_data[i] : 0));
4031 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4032 write_op(tp, (cpu_scratch_base +
4033 (info->data_base & 0xffff) +
4036 info->data_data[i] : 0));
4041 tp->tg3_flags = orig_tg3_flags;
4045 /* tp->lock is held. */
4046 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4048 struct fw_info info;
4051 info.text_base = TG3_FW_TEXT_ADDR;
4052 info.text_len = TG3_FW_TEXT_LEN;
4053 info.text_data = &tg3FwText[0];
4054 info.rodata_base = TG3_FW_RODATA_ADDR;
4055 info.rodata_len = TG3_FW_RODATA_LEN;
4056 info.rodata_data = &tg3FwRodata[0];
4057 info.data_base = TG3_FW_DATA_ADDR;
4058 info.data_len = TG3_FW_DATA_LEN;
4059 info.data_data = NULL;
4061 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4062 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4067 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4068 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4073 /* Now startup only the RX cpu. */
4074 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4075 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4077 for (i = 0; i < 5; i++) {
4078 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4080 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4081 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4082 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4086 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4087 "to set RX CPU PC, is %08x should be %08x\n",
4088 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4092 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4093 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4098 #if TG3_TSO_SUPPORT != 0
4100 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4101 #define TG3_TSO_FW_RELASE_MINOR 0x6
4102 #define TG3_TSO_FW_RELEASE_FIX 0x0
4103 #define TG3_TSO_FW_START_ADDR 0x08000000
4104 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4105 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4106 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4107 #define TG3_TSO_FW_RODATA_LEN 0x60
4108 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4109 #define TG3_TSO_FW_DATA_LEN 0x30
4110 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4111 #define TG3_TSO_FW_SBSS_LEN 0x2c
4112 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4113 #define TG3_TSO_FW_BSS_LEN 0x894
4115 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4116 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4117 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4118 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4119 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4120 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4121 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4122 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4123 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4124 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4125 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4126 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4127 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4128 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4129 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4130 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4131 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4132 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4133 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4134 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4135 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4136 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4137 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4138 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4139 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4140 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4141 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4142 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4143 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4144 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4145 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4146 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4147 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4148 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4149 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4150 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4151 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4152 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4153 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4154 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4155 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4156 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4157 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4158 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4159 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4160 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4161 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4162 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4163 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4164 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4165 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4166 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4167 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4168 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4169 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4170 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4171 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4172 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4173 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4174 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4175 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4176 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4177 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4178 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4179 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4180 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4181 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4182 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4183 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4184 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4185 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4186 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4187 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4188 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4189 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4190 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4191 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4192 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4193 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4194 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4195 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4196 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4197 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4198 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4199 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4200 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4201 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4202 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4203 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4204 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4205 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4206 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4207 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4208 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4209 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4210 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4211 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4212 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4213 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4214 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4215 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4216 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4217 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4218 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4219 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4220 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4221 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4222 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4223 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4224 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4225 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4226 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4227 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4228 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4229 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4230 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4231 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4232 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4233 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4234 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4235 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4236 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4237 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4238 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4239 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4240 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4241 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4242 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4243 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4244 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4245 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4246 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4247 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4248 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4249 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4250 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4251 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4252 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4253 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4254 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4255 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4256 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4257 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4258 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4259 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4260 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4261 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4262 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4263 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4264 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4265 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4266 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4267 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4268 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4269 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4270 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4271 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4272 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4273 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4274 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4275 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4276 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4277 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4278 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4279 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4280 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4281 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4282 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4283 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4284 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4285 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4286 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4287 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4288 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4289 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4290 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4291 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4292 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4293 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4294 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4295 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4296 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4297 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4298 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4299 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4300 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4301 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4302 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4303 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4304 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4305 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4306 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4307 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4308 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4309 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4310 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4311 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4312 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4313 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4314 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4315 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4316 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4317 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4318 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4319 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4320 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4321 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4322 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4323 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4324 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4325 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4326 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4327 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4328 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4329 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4330 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4331 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4332 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4333 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4334 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4335 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4336 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4337 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4338 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4339 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4340 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4341 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4342 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4343 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4344 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4345 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4346 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4347 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4348 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4349 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4350 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4351 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4352 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4353 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4354 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4355 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4356 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4357 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4358 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4359 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4360 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4361 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4362 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4363 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4364 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4365 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4366 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4367 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4368 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4369 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4370 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4371 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4372 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4373 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4374 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4375 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4376 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4377 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4378 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4379 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4380 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4381 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4382 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4383 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4384 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4385 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4386 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4387 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4388 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4389 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4390 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4391 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4392 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4393 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4394 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4395 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4396 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4397 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4398 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4399 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4402 u32 tg3TsoFwRodata[] = {
4403 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4404 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4405 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4406 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4410 u32 tg3TsoFwData[] = {
4411 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4412 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4416 /* 5705 needs a special version of the TSO firmware. */
4417 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4418 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4419 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4420 #define TG3_TSO5_FW_START_ADDR 0x00010000
4421 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4422 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4423 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4424 #define TG3_TSO5_FW_RODATA_LEN 0x50
4425 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4426 #define TG3_TSO5_FW_DATA_LEN 0x20
4427 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4428 #define TG3_TSO5_FW_SBSS_LEN 0x28
4429 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4430 #define TG3_TSO5_FW_BSS_LEN 0x88
4432 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4433 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4434 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4435 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4436 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4437 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4438 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4439 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4440 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4441 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4442 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4443 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4444 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4445 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4446 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4447 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4448 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4449 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4450 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4451 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4452 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4453 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4454 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4455 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4456 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4457 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4458 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4459 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4460 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4461 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4462 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4463 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4464 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4465 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4466 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4467 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4468 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4469 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4470 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4471 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4472 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4473 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4474 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4475 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4476 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4477 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4478 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4479 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4480 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4481 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4482 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4483 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4484 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4485 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4486 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4487 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4488 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4489 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4490 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4491 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4492 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4493 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4494 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4495 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4496 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4497 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4498 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4499 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4500 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4501 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4502 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4503 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4504 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4505 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4506 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4507 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4508 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4509 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4510 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4511 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4512 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4513 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4514 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4515 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4516 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4517 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4518 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4519 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4520 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4521 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4522 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4523 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4524 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4525 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4526 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4527 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4528 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4529 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4530 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4531 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4532 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4533 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4534 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4535 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4536 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4537 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4538 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4539 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4540 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4541 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4542 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4543 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4544 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4545 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4546 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4547 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4548 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4549 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4550 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4551 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4552 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4553 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4554 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4555 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4556 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4557 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4558 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4559 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4560 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4561 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4562 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4563 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4564 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4565 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4566 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4567 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4568 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4569 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4570 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4571 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4572 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4573 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4574 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4575 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4576 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4577 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4578 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4579 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4580 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4581 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4582 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4583 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4584 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4585 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4586 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4587 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4588 0x00000000, 0x00000000, 0x00000000,
4591 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4592 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4593 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4594 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4595 0x00000000, 0x00000000, 0x00000000,
4598 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4599 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4600 0x00000000, 0x00000000, 0x00000000,
4603 /* tp->lock is held. */
4604 static int tg3_load_tso_firmware(struct tg3 *tp)
4606 struct fw_info info;
4607 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4614 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4615 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4616 info.text_data = &tg3Tso5FwText[0];
4617 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4618 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4619 info.rodata_data = &tg3Tso5FwRodata[0];
4620 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4621 info.data_len = TG3_TSO5_FW_DATA_LEN;
4622 info.data_data = &tg3Tso5FwData[0];
4623 cpu_base = RX_CPU_BASE;
4624 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4625 cpu_scratch_size = (info.text_len +
4628 TG3_TSO5_FW_SBSS_LEN +
4629 TG3_TSO5_FW_BSS_LEN);
4631 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4632 info.text_len = TG3_TSO_FW_TEXT_LEN;
4633 info.text_data = &tg3TsoFwText[0];
4634 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4635 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4636 info.rodata_data = &tg3TsoFwRodata[0];
4637 info.data_base = TG3_TSO_FW_DATA_ADDR;
4638 info.data_len = TG3_TSO_FW_DATA_LEN;
4639 info.data_data = &tg3TsoFwData[0];
4640 cpu_base = TX_CPU_BASE;
4641 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4642 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4645 err = tg3_load_firmware_cpu(tp, cpu_base,
4646 cpu_scratch_base, cpu_scratch_size,
4651 /* Now startup the cpu. */
4652 tw32(cpu_base + CPU_STATE, 0xffffffff);
4653 tw32_f(cpu_base + CPU_PC, info.text_base);
4655 for (i = 0; i < 5; i++) {
4656 if (tr32(cpu_base + CPU_PC) == info.text_base)
4658 tw32(cpu_base + CPU_STATE, 0xffffffff);
4659 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
4660 tw32_f(cpu_base + CPU_PC, info.text_base);
4664 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4665 "to set CPU PC, is %08x should be %08x\n",
4666 tp->dev->name, tr32(cpu_base + CPU_PC),
4670 tw32(cpu_base + CPU_STATE, 0xffffffff);
4671 tw32_f(cpu_base + CPU_MODE, 0x00000000);
4675 #endif /* TG3_TSO_SUPPORT != 0 */
4677 /* tp->lock is held. */
4678 static void __tg3_set_mac_addr(struct tg3 *tp)
4680 u32 addr_high, addr_low;
4683 addr_high = ((tp->dev->dev_addr[0] << 8) |
4684 tp->dev->dev_addr[1]);
4685 addr_low = ((tp->dev->dev_addr[2] << 24) |
4686 (tp->dev->dev_addr[3] << 16) |
4687 (tp->dev->dev_addr[4] << 8) |
4688 (tp->dev->dev_addr[5] << 0));
4689 for (i = 0; i < 4; i++) {
4690 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4691 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4694 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4696 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4697 for (i = 0; i < 12; i++) {
4698 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4699 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4703 addr_high = (tp->dev->dev_addr[0] +
4704 tp->dev->dev_addr[1] +
4705 tp->dev->dev_addr[2] +
4706 tp->dev->dev_addr[3] +
4707 tp->dev->dev_addr[4] +
4708 tp->dev->dev_addr[5]) &
4709 TX_BACKOFF_SEED_MASK;
4710 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4713 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4715 struct tg3 *tp = netdev_priv(dev);
4716 struct sockaddr *addr = p;
4718 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4720 spin_lock_irq(&tp->lock);
4721 __tg3_set_mac_addr(tp);
4722 spin_unlock_irq(&tp->lock);
4727 /* tp->lock is held. */
4728 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4729 dma_addr_t mapping, u32 maxlen_flags,
4733 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4734 ((u64) mapping >> 32));
4736 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4737 ((u64) mapping & 0xffffffff));
4739 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4742 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4744 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4748 static void __tg3_set_rx_mode(struct net_device *);
4750 /* tp->lock is held. */
4751 static int tg3_reset_hw(struct tg3 *tp)
4753 u32 val, rdmac_mode;
4756 tg3_disable_ints(tp);
4760 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4762 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4763 err = tg3_abort_hw(tp);
4768 err = tg3_chip_reset(tp);
4772 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4774 /* This works around an issue with Athlon chipsets on
4775 * B3 tigon3 silicon. This bit has no effect on any
4776 * other revision. But do not set this on PCI Express
4779 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4780 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4781 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4783 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4784 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4785 val = tr32(TG3PCI_PCISTATE);
4786 val |= PCISTATE_RETRY_SAME_DMA;
4787 tw32(TG3PCI_PCISTATE, val);
4790 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4791 /* Enable some hw fixes. */
4792 val = tr32(TG3PCI_MSI_DATA);
4793 val |= (1 << 26) | (1 << 28) | (1 << 29);
4794 tw32(TG3PCI_MSI_DATA, val);
4797 /* Descriptor ring init may make accesses to the
4798 * NIC SRAM area to setup the TX descriptors, so we
4799 * can only do this after the hardware has been
4800 * successfully reset.
4804 /* This value is determined during the probe time DMA
4805 * engine test, tg3_test_dma.
4807 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4809 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4810 GRC_MODE_4X_NIC_SEND_RINGS |
4811 GRC_MODE_NO_TX_PHDR_CSUM |
4812 GRC_MODE_NO_RX_PHDR_CSUM);
4813 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4814 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4816 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4817 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4818 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4819 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4820 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4824 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4826 /* Setup the timer prescalar register. Clock is always 66Mhz. */
4827 val = tr32(GRC_MISC_CFG);
4829 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4830 tw32(GRC_MISC_CFG, val);
4832 /* Initialize MBUF/DESC pool. */
4833 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4835 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4836 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4838 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4840 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4841 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4842 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4844 #if TG3_TSO_SUPPORT != 0
4845 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4848 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4849 TG3_TSO5_FW_RODATA_LEN +
4850 TG3_TSO5_FW_DATA_LEN +
4851 TG3_TSO5_FW_SBSS_LEN +
4852 TG3_TSO5_FW_BSS_LEN);
4853 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4854 tw32(BUFMGR_MB_POOL_ADDR,
4855 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4856 tw32(BUFMGR_MB_POOL_SIZE,
4857 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4861 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4862 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4863 tp->bufmgr_config.mbuf_read_dma_low_water);
4864 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4865 tp->bufmgr_config.mbuf_mac_rx_low_water);
4866 tw32(BUFMGR_MB_HIGH_WATER,
4867 tp->bufmgr_config.mbuf_high_water);
4869 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4870 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4871 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4872 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4873 tw32(BUFMGR_MB_HIGH_WATER,
4874 tp->bufmgr_config.mbuf_high_water_jumbo);
4876 tw32(BUFMGR_DMA_LOW_WATER,
4877 tp->bufmgr_config.dma_low_water);
4878 tw32(BUFMGR_DMA_HIGH_WATER,
4879 tp->bufmgr_config.dma_high_water);
4881 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4882 for (i = 0; i < 2000; i++) {
4883 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4888 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4893 /* Setup replenish threshold. */
4894 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4896 /* Initialize TG3_BDINFO's at:
4897 * RCVDBDI_STD_BD: standard eth size rx ring
4898 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
4899 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
4902 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
4903 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
4904 * ring attribute flags
4905 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
4907 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4908 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4910 * The size of each ring is fixed in the firmware, but the location is
4913 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4914 ((u64) tp->rx_std_mapping >> 32));
4915 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4916 ((u64) tp->rx_std_mapping & 0xffffffff));
4917 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
4918 NIC_SRAM_RX_BUFFER_DESC);
4920 /* Don't even try to program the JUMBO/MINI buffer descriptor
4923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
4924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4925 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4926 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
4928 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4929 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4931 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
4932 BDINFO_FLAGS_DISABLED);
4934 /* Setup replenish threshold. */
4935 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
4937 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
4938 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4939 ((u64) tp->rx_jumbo_mapping >> 32));
4940 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4941 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
4942 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4943 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4944 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
4945 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
4947 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4948 BDINFO_FLAGS_DISABLED);
4953 /* There is only one send ring on 5705/5750, no need to explicitly
4954 * disable the others.
4956 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
4957 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
4958 /* Clear out send RCB ring in SRAM. */
4959 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
4960 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4961 BDINFO_FLAGS_DISABLED);
4966 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4967 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4969 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
4970 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4971 tp->tx_desc_mapping,
4972 (TG3_TX_RING_SIZE <<
4973 BDINFO_FLAGS_MAXLEN_SHIFT),
4974 NIC_SRAM_TX_BUFFER_DESC);
4976 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4978 BDINFO_FLAGS_DISABLED,
4979 NIC_SRAM_TX_BUFFER_DESC);
4982 /* There is only one receive return ring on 5705/5750, no need
4983 * to explicitly disable the others.
4985 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
4986 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
4987 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
4988 i += TG3_BDINFO_SIZE) {
4989 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4990 BDINFO_FLAGS_DISABLED);
4995 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
4997 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
4999 (TG3_RX_RCB_RING_SIZE(tp) <<
5000 BDINFO_FLAGS_MAXLEN_SHIFT),
5003 tp->rx_std_ptr = tp->rx_pending;
5004 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5007 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5008 tp->rx_jumbo_pending : 0;
5009 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5012 /* Initialize MAC address and backoff seed. */
5013 __tg3_set_mac_addr(tp);
5015 /* MTU + ethernet header + FCS + optional VLAN tag */
5016 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5018 /* The slot time is changed by tg3_setup_phy if we
5019 * run at gigabit with half duplex.
5021 tw32(MAC_TX_LENGTHS,
5022 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5023 (6 << TX_LENGTHS_IPG_SHIFT) |
5024 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5026 /* Receive rules. */
5027 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5028 tw32(RCVLPC_CONFIG, 0x0181);
5030 /* Calculate RDMAC_MODE setting early, we need it to determine
5031 * the RCVLPC_STATE_ENABLE mask.
5033 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5034 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5035 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5036 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5037 RDMAC_MODE_LNGREAD_ENAB);
5038 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5039 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5040 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5041 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5042 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5043 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5044 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5045 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5046 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5047 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5048 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5049 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5053 #if TG3_TSO_SUPPORT != 0
5054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5055 rdmac_mode |= (1 << 27);
5058 /* Receive/send statistics. */
5059 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5060 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5061 val = tr32(RCVLPC_STATS_ENABLE);
5062 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5063 tw32(RCVLPC_STATS_ENABLE, val);
5065 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5067 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5068 tw32(SNDDATAI_STATSENAB, 0xffffff);
5069 tw32(SNDDATAI_STATSCTRL,
5070 (SNDDATAI_SCTRL_ENABLE |
5071 SNDDATAI_SCTRL_FASTUPD));
5073 /* Setup host coalescing engine. */
5074 tw32(HOSTCC_MODE, 0);
5075 for (i = 0; i < 2000; i++) {
5076 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5081 tw32(HOSTCC_RXCOL_TICKS, 0);
5082 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5083 tw32(HOSTCC_RXMAX_FRAMES, 1);
5084 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5085 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5086 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5087 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5088 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5090 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5091 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5093 /* set status block DMA address */
5094 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5095 ((u64) tp->status_mapping >> 32));
5096 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5097 ((u64) tp->status_mapping & 0xffffffff));
5099 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5100 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5101 /* Status/statistics block address. See tg3_timer,
5102 * the tg3_periodic_fetch_stats call there, and
5103 * tg3_get_stats to see how this works for 5705/5750 chips.
5105 tw32(HOSTCC_STAT_COAL_TICKS,
5106 DEFAULT_STAT_COAL_TICKS);
5107 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5108 ((u64) tp->stats_mapping >> 32));
5109 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5110 ((u64) tp->stats_mapping & 0xffffffff));
5111 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5112 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5115 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5117 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5118 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5119 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5120 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5121 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5123 /* Clear statistics/status block in chip, and status block in ram. */
5124 for (i = NIC_SRAM_STATS_BLK;
5125 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5127 tg3_write_mem(tp, i, 0);
5130 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5132 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5133 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5134 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5137 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5139 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5140 GRC_LCLCTRL_GPIO_OUTPUT1);
5141 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5144 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5145 tr32(MAILBOX_INTERRUPT_0);
5147 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5148 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5149 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5153 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5154 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5155 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5156 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5157 WDMAC_MODE_LNGREAD_ENAB);
5159 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5160 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5162 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5163 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5164 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5166 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5167 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5168 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5169 val |= WDMAC_MODE_RX_ACCEL;
5173 tw32_f(WDMAC_MODE, val);
5176 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5177 val = tr32(TG3PCI_X_CAPS);
5178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5179 val &= ~PCIX_CAPS_BURST_MASK;
5180 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5181 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5182 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5183 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5184 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5185 val |= (tp->split_mode_max_reqs <<
5186 PCIX_CAPS_SPLIT_SHIFT);
5188 tw32(TG3PCI_X_CAPS, val);
5191 tw32_f(RDMAC_MODE, rdmac_mode);
5194 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5195 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5196 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5197 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5198 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5199 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5200 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5201 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5202 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5203 #if TG3_TSO_SUPPORT != 0
5204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5205 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5207 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5208 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5210 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5211 err = tg3_load_5701_a0_firmware_fix(tp);
5216 #if TG3_TSO_SUPPORT != 0
5217 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5218 err = tg3_load_tso_firmware(tp);
5224 tp->tx_mode = TX_MODE_ENABLE;
5225 tw32_f(MAC_TX_MODE, tp->tx_mode);
5228 tp->rx_mode = RX_MODE_ENABLE;
5229 tw32_f(MAC_RX_MODE, tp->rx_mode);
5232 if (tp->link_config.phy_is_low_power) {
5233 tp->link_config.phy_is_low_power = 0;
5234 tp->link_config.speed = tp->link_config.orig_speed;
5235 tp->link_config.duplex = tp->link_config.orig_duplex;
5236 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5239 tp->mi_mode = MAC_MI_MODE_BASE;
5240 tw32_f(MAC_MI_MODE, tp->mi_mode);
5243 tw32(MAC_LED_CTRL, tp->led_ctrl);
5245 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5246 if (tp->phy_id == PHY_ID_SERDES) {
5247 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5250 tw32_f(MAC_RX_MODE, tp->rx_mode);
5253 if (tp->phy_id == PHY_ID_SERDES) {
5254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5255 /* Set drive transmission level to 1.2V */
5256 val = tr32(MAC_SERDES_CFG);
5259 tw32(MAC_SERDES_CFG, val);
5261 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5262 tw32(MAC_SERDES_CFG, 0x616000);
5265 /* Prevent chip from dropping frames when flow control
5268 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5271 tp->phy_id == PHY_ID_SERDES) {
5272 /* Enable hardware link auto-negotiation */
5273 u32 digctrl, txctrl;
5275 digctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_CRC16_CLEAR_N |
5276 SG_DIG_LOCAL_DUPLEX_STATUS | SG_DIG_LOCAL_LINK_STATUS |
5277 (2 << SG_DIG_SPEED_STATUS_SHIFT) | SG_DIG_FIBER_MODE |
5280 txctrl = tr32(MAC_SERDES_CFG);
5281 tw32_f(MAC_SERDES_CFG, txctrl | MAC_SERDES_CFG_EDGE_SELECT);
5282 tw32_f(SG_DIG_CTRL, digctrl | SG_DIG_SOFT_RESET);
5285 tw32_f(SG_DIG_CTRL, digctrl);
5287 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5290 err = tg3_setup_phy(tp, 1);
5294 if (tp->phy_id != PHY_ID_SERDES) {
5297 /* Clear CRC stats. */
5298 tg3_readphy(tp, 0x1e, &tmp);
5299 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5300 tg3_readphy(tp, 0x14, &tmp);
5303 __tg3_set_rx_mode(tp->dev);
5305 /* Initialize receive rules. */
5306 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5307 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5308 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5309 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5316 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5320 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5322 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5324 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5326 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5328 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5330 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5332 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5334 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5336 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5338 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5340 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5342 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5344 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5346 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5354 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5356 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5357 tg3_enable_ints(tp);
5362 /* Called at device open time to get the chip ready for
5363 * packet processing. Invoked with tp->lock held.
5365 static int tg3_init_hw(struct tg3 *tp)
5369 /* Force the chip into D0. */
5370 err = tg3_set_power_state(tp, 0);
5374 tg3_switch_clocks(tp);
5376 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5378 err = tg3_reset_hw(tp);
5384 #define TG3_STAT_ADD32(PSTAT, REG) \
5385 do { u32 __val = tr32(REG); \
5386 (PSTAT)->low += __val; \
5387 if ((PSTAT)->low < __val) \
5388 (PSTAT)->high += 1; \
5391 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5393 struct tg3_hw_stats *sp = tp->hw_stats;
5395 if (!netif_carrier_ok(tp->dev))
5398 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5399 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5400 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5401 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5402 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5403 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5404 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5405 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5406 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5407 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5408 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5409 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5410 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5412 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5413 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5414 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5415 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5416 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5417 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5418 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5419 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5420 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5421 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5422 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5423 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5424 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5425 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5428 static void tg3_timer(unsigned long __opaque)
5430 struct tg3 *tp = (struct tg3 *) __opaque;
5431 unsigned long flags;
5433 spin_lock_irqsave(&tp->lock, flags);
5434 spin_lock(&tp->tx_lock);
5436 /* All of this garbage is because when using non-tagged
5437 * IRQ status the mailbox/status_block protocol the chip
5438 * uses with the cpu is race prone.
5440 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5441 tw32(GRC_LOCAL_CTRL,
5442 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5444 tw32(HOSTCC_MODE, tp->coalesce_mode |
5445 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5448 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5449 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5450 spin_unlock(&tp->tx_lock);
5451 spin_unlock_irqrestore(&tp->lock, flags);
5452 schedule_work(&tp->reset_task);
5456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5458 tg3_periodic_fetch_stats(tp);
5460 /* This part only runs once per second. */
5461 if (!--tp->timer_counter) {
5462 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5466 mac_stat = tr32(MAC_STATUS);
5469 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5470 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5472 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5476 tg3_setup_phy(tp, 0);
5477 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5478 u32 mac_stat = tr32(MAC_STATUS);
5481 if (netif_carrier_ok(tp->dev) &&
5482 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5485 if (! netif_carrier_ok(tp->dev) &&
5486 (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5492 ~MAC_MODE_PORT_MODE_MASK));
5494 tw32_f(MAC_MODE, tp->mac_mode);
5496 tg3_setup_phy(tp, 0);
5500 tp->timer_counter = tp->timer_multiplier;
5503 /* Heartbeat is only sent once every 120 seconds. */
5504 if (!--tp->asf_counter) {
5505 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5510 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5511 val = tr32(GRC_RX_CPU_EVENT);
5513 tw32(GRC_RX_CPU_EVENT, val);
5515 tp->asf_counter = tp->asf_multiplier;
5518 spin_unlock(&tp->tx_lock);
5519 spin_unlock_irqrestore(&tp->lock, flags);
5521 tp->timer.expires = jiffies + tp->timer_offset;
5522 add_timer(&tp->timer);
5525 static int tg3_open(struct net_device *dev)
5527 struct tg3 *tp = netdev_priv(dev);
5530 spin_lock_irq(&tp->lock);
5531 spin_lock(&tp->tx_lock);
5533 tg3_disable_ints(tp);
5534 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5536 spin_unlock(&tp->tx_lock);
5537 spin_unlock_irq(&tp->lock);
5539 /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5540 * tp->tg3_flags is accurate at that new place.
5542 err = tg3_alloc_consistent(tp);
5546 err = request_irq(dev->irq, tg3_interrupt,
5547 SA_SHIRQ, dev->name, dev);
5550 tg3_free_consistent(tp);
5554 spin_lock_irq(&tp->lock);
5555 spin_lock(&tp->tx_lock);
5557 err = tg3_init_hw(tp);
5562 tp->timer_offset = HZ / 10;
5563 tp->timer_counter = tp->timer_multiplier = 10;
5564 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5566 init_timer(&tp->timer);
5567 tp->timer.expires = jiffies + tp->timer_offset;
5568 tp->timer.data = (unsigned long) tp;
5569 tp->timer.function = tg3_timer;
5570 add_timer(&tp->timer);
5572 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5575 spin_unlock(&tp->tx_lock);
5576 spin_unlock_irq(&tp->lock);
5579 free_irq(dev->irq, dev);
5580 tg3_free_consistent(tp);
5584 spin_lock_irq(&tp->lock);
5585 spin_lock(&tp->tx_lock);
5587 tg3_enable_ints(tp);
5589 spin_unlock(&tp->tx_lock);
5590 spin_unlock_irq(&tp->lock);
5592 netif_start_queue(dev);
5598 /*static*/ void tg3_dump_state(struct tg3 *tp)
5600 u32 val32, val32_2, val32_3, val32_4, val32_5;
5604 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5605 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5606 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5610 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5611 tr32(MAC_MODE), tr32(MAC_STATUS));
5612 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5613 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5614 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5615 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5616 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5617 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5619 /* Send data initiator control block */
5620 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5621 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5622 printk(" SNDDATAI_STATSCTRL[%08x]\n",
5623 tr32(SNDDATAI_STATSCTRL));
5625 /* Send data completion control block */
5626 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5628 /* Send BD ring selector block */
5629 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5630 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5632 /* Send BD initiator control block */
5633 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5634 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5636 /* Send BD completion control block */
5637 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5639 /* Receive list placement control block */
5640 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5641 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5642 printk(" RCVLPC_STATSCTRL[%08x]\n",
5643 tr32(RCVLPC_STATSCTRL));
5645 /* Receive data and receive BD initiator control block */
5646 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5647 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5649 /* Receive data completion control block */
5650 printk("DEBUG: RCVDCC_MODE[%08x]\n",
5653 /* Receive BD initiator control block */
5654 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5655 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5657 /* Receive BD completion control block */
5658 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5659 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5661 /* Receive list selector control block */
5662 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5663 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5665 /* Mbuf cluster free block */
5666 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5667 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5669 /* Host coalescing control block */
5670 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5671 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5672 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5673 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5674 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5675 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5676 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5677 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5678 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5679 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5680 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5681 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5683 /* Memory arbiter control block */
5684 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5685 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5687 /* Buffer manager control block */
5688 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5689 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5690 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5691 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5692 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5693 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5694 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5695 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5697 /* Read DMA control block */
5698 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5699 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5701 /* Write DMA control block */
5702 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5703 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5705 /* DMA completion block */
5706 printk("DEBUG: DMAC_MODE[%08x]\n",
5710 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5711 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5712 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5713 tr32(GRC_LOCAL_CTRL));
5716 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5717 tr32(RCVDBDI_JUMBO_BD + 0x0),
5718 tr32(RCVDBDI_JUMBO_BD + 0x4),
5719 tr32(RCVDBDI_JUMBO_BD + 0x8),
5720 tr32(RCVDBDI_JUMBO_BD + 0xc));
5721 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5722 tr32(RCVDBDI_STD_BD + 0x0),
5723 tr32(RCVDBDI_STD_BD + 0x4),
5724 tr32(RCVDBDI_STD_BD + 0x8),
5725 tr32(RCVDBDI_STD_BD + 0xc));
5726 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5727 tr32(RCVDBDI_MINI_BD + 0x0),
5728 tr32(RCVDBDI_MINI_BD + 0x4),
5729 tr32(RCVDBDI_MINI_BD + 0x8),
5730 tr32(RCVDBDI_MINI_BD + 0xc));
5732 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5733 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5734 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5735 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5736 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5737 val32, val32_2, val32_3, val32_4);
5739 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5740 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5741 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5742 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5743 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5744 val32, val32_2, val32_3, val32_4);
5746 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5747 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5748 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5749 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5750 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5751 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5752 val32, val32_2, val32_3, val32_4, val32_5);
5754 /* SW status block */
5755 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5756 tp->hw_status->status,
5757 tp->hw_status->status_tag,
5758 tp->hw_status->rx_jumbo_consumer,
5759 tp->hw_status->rx_consumer,
5760 tp->hw_status->rx_mini_consumer,
5761 tp->hw_status->idx[0].rx_producer,
5762 tp->hw_status->idx[0].tx_consumer);
5764 /* SW statistics block */
5765 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5766 ((u32 *)tp->hw_stats)[0],
5767 ((u32 *)tp->hw_stats)[1],
5768 ((u32 *)tp->hw_stats)[2],
5769 ((u32 *)tp->hw_stats)[3]);
5772 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5773 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5774 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5775 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5776 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5778 /* NIC side send descriptors. */
5779 for (i = 0; i < 6; i++) {
5782 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5783 + (i * sizeof(struct tg3_tx_buffer_desc));
5784 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5786 readl(txd + 0x0), readl(txd + 0x4),
5787 readl(txd + 0x8), readl(txd + 0xc));
5790 /* NIC side RX descriptors. */
5791 for (i = 0; i < 6; i++) {
5794 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5795 + (i * sizeof(struct tg3_rx_buffer_desc));
5796 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5798 readl(rxd + 0x0), readl(rxd + 0x4),
5799 readl(rxd + 0x8), readl(rxd + 0xc));
5800 rxd += (4 * sizeof(u32));
5801 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5803 readl(rxd + 0x0), readl(rxd + 0x4),
5804 readl(rxd + 0x8), readl(rxd + 0xc));
5807 for (i = 0; i < 6; i++) {
5810 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5811 + (i * sizeof(struct tg3_rx_buffer_desc));
5812 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5814 readl(rxd + 0x0), readl(rxd + 0x4),
5815 readl(rxd + 0x8), readl(rxd + 0xc));
5816 rxd += (4 * sizeof(u32));
5817 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5819 readl(rxd + 0x0), readl(rxd + 0x4),
5820 readl(rxd + 0x8), readl(rxd + 0xc));
5825 static struct net_device_stats *tg3_get_stats(struct net_device *);
5826 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5828 static int tg3_close(struct net_device *dev)
5830 struct tg3 *tp = netdev_priv(dev);
5832 netif_stop_queue(dev);
5834 del_timer_sync(&tp->timer);
5836 spin_lock_irq(&tp->lock);
5837 spin_lock(&tp->tx_lock);
5842 tg3_disable_ints(tp);
5847 ~(TG3_FLAG_INIT_COMPLETE |
5848 TG3_FLAG_GOT_SERDES_FLOWCTL);
5849 netif_carrier_off(tp->dev);
5851 spin_unlock(&tp->tx_lock);
5852 spin_unlock_irq(&tp->lock);
5854 free_irq(dev->irq, dev);
5856 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5857 sizeof(tp->net_stats_prev));
5858 memcpy(&tp->estats_prev, tg3_get_estats(tp),
5859 sizeof(tp->estats_prev));
5861 tg3_free_consistent(tp);
5866 static inline unsigned long get_stat64(tg3_stat64_t *val)
5870 #if (BITS_PER_LONG == 32)
5873 ret = ((u64)val->high << 32) | ((u64)val->low);
5878 static unsigned long calc_crc_errors(struct tg3 *tp)
5880 struct tg3_hw_stats *hw_stats = tp->hw_stats;
5882 if (tp->phy_id != PHY_ID_SERDES &&
5883 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5885 unsigned long flags;
5888 spin_lock_irqsave(&tp->lock, flags);
5889 tg3_readphy(tp, 0x1e, &val);
5890 tg3_writephy(tp, 0x1e, val | 0x8000);
5891 tg3_readphy(tp, 0x14, &val);
5892 spin_unlock_irqrestore(&tp->lock, flags);
5894 tp->phy_crc_errors += val;
5896 return tp->phy_crc_errors;
5899 return get_stat64(&hw_stats->rx_fcs_errors);
5902 #define ESTAT_ADD(member) \
5903 estats->member = old_estats->member + \
5904 get_stat64(&hw_stats->member)
5906 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5908 struct tg3_ethtool_stats *estats = &tp->estats;
5909 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5910 struct tg3_hw_stats *hw_stats = tp->hw_stats;
5915 ESTAT_ADD(rx_octets);
5916 ESTAT_ADD(rx_fragments);
5917 ESTAT_ADD(rx_ucast_packets);
5918 ESTAT_ADD(rx_mcast_packets);
5919 ESTAT_ADD(rx_bcast_packets);
5920 ESTAT_ADD(rx_fcs_errors);
5921 ESTAT_ADD(rx_align_errors);
5922 ESTAT_ADD(rx_xon_pause_rcvd);
5923 ESTAT_ADD(rx_xoff_pause_rcvd);
5924 ESTAT_ADD(rx_mac_ctrl_rcvd);
5925 ESTAT_ADD(rx_xoff_entered);
5926 ESTAT_ADD(rx_frame_too_long_errors);
5927 ESTAT_ADD(rx_jabbers);
5928 ESTAT_ADD(rx_undersize_packets);
5929 ESTAT_ADD(rx_in_length_errors);
5930 ESTAT_ADD(rx_out_length_errors);
5931 ESTAT_ADD(rx_64_or_less_octet_packets);
5932 ESTAT_ADD(rx_65_to_127_octet_packets);
5933 ESTAT_ADD(rx_128_to_255_octet_packets);
5934 ESTAT_ADD(rx_256_to_511_octet_packets);
5935 ESTAT_ADD(rx_512_to_1023_octet_packets);
5936 ESTAT_ADD(rx_1024_to_1522_octet_packets);
5937 ESTAT_ADD(rx_1523_to_2047_octet_packets);
5938 ESTAT_ADD(rx_2048_to_4095_octet_packets);
5939 ESTAT_ADD(rx_4096_to_8191_octet_packets);
5940 ESTAT_ADD(rx_8192_to_9022_octet_packets);
5942 ESTAT_ADD(tx_octets);
5943 ESTAT_ADD(tx_collisions);
5944 ESTAT_ADD(tx_xon_sent);
5945 ESTAT_ADD(tx_xoff_sent);
5946 ESTAT_ADD(tx_flow_control);
5947 ESTAT_ADD(tx_mac_errors);
5948 ESTAT_ADD(tx_single_collisions);
5949 ESTAT_ADD(tx_mult_collisions);
5950 ESTAT_ADD(tx_deferred);
5951 ESTAT_ADD(tx_excessive_collisions);
5952 ESTAT_ADD(tx_late_collisions);
5953 ESTAT_ADD(tx_collide_2times);
5954 ESTAT_ADD(tx_collide_3times);
5955 ESTAT_ADD(tx_collide_4times);
5956 ESTAT_ADD(tx_collide_5times);
5957 ESTAT_ADD(tx_collide_6times);
5958 ESTAT_ADD(tx_collide_7times);
5959 ESTAT_ADD(tx_collide_8times);
5960 ESTAT_ADD(tx_collide_9times);
5961 ESTAT_ADD(tx_collide_10times);
5962 ESTAT_ADD(tx_collide_11times);
5963 ESTAT_ADD(tx_collide_12times);
5964 ESTAT_ADD(tx_collide_13times);
5965 ESTAT_ADD(tx_collide_14times);
5966 ESTAT_ADD(tx_collide_15times);
5967 ESTAT_ADD(tx_ucast_packets);
5968 ESTAT_ADD(tx_mcast_packets);
5969 ESTAT_ADD(tx_bcast_packets);
5970 ESTAT_ADD(tx_carrier_sense_errors);
5971 ESTAT_ADD(tx_discards);
5972 ESTAT_ADD(tx_errors);
5974 ESTAT_ADD(dma_writeq_full);
5975 ESTAT_ADD(dma_write_prioq_full);
5976 ESTAT_ADD(rxbds_empty);
5977 ESTAT_ADD(rx_discards);
5978 ESTAT_ADD(rx_errors);
5979 ESTAT_ADD(rx_threshold_hit);
5981 ESTAT_ADD(dma_readq_full);
5982 ESTAT_ADD(dma_read_prioq_full);
5983 ESTAT_ADD(tx_comp_queue_full);
5985 ESTAT_ADD(ring_set_send_prod_index);
5986 ESTAT_ADD(ring_status_update);
5987 ESTAT_ADD(nic_irqs);
5988 ESTAT_ADD(nic_avoided_irqs);
5989 ESTAT_ADD(nic_tx_threshold_hit);
5994 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
5996 struct tg3 *tp = netdev_priv(dev);
5997 struct net_device_stats *stats = &tp->net_stats;
5998 struct net_device_stats *old_stats = &tp->net_stats_prev;
5999 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6004 stats->rx_packets = old_stats->rx_packets +
6005 get_stat64(&hw_stats->rx_ucast_packets) +
6006 get_stat64(&hw_stats->rx_mcast_packets) +
6007 get_stat64(&hw_stats->rx_bcast_packets);
6009 stats->tx_packets = old_stats->tx_packets +
6010 get_stat64(&hw_stats->tx_ucast_packets) +
6011 get_stat64(&hw_stats->tx_mcast_packets) +
6012 get_stat64(&hw_stats->tx_bcast_packets);
6014 stats->rx_bytes = old_stats->rx_bytes +
6015 get_stat64(&hw_stats->rx_octets);
6016 stats->tx_bytes = old_stats->tx_bytes +
6017 get_stat64(&hw_stats->tx_octets);
6019 stats->rx_errors = old_stats->rx_errors +
6020 get_stat64(&hw_stats->rx_errors) +
6021 get_stat64(&hw_stats->rx_discards);
6022 stats->tx_errors = old_stats->tx_errors +
6023 get_stat64(&hw_stats->tx_errors) +
6024 get_stat64(&hw_stats->tx_mac_errors) +
6025 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6026 get_stat64(&hw_stats->tx_discards);
6028 stats->multicast = old_stats->multicast +
6029 get_stat64(&hw_stats->rx_mcast_packets);
6030 stats->collisions = old_stats->collisions +
6031 get_stat64(&hw_stats->tx_collisions);
6033 stats->rx_length_errors = old_stats->rx_length_errors +
6034 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6035 get_stat64(&hw_stats->rx_undersize_packets);
6037 stats->rx_over_errors = old_stats->rx_over_errors +
6038 get_stat64(&hw_stats->rxbds_empty);
6039 stats->rx_frame_errors = old_stats->rx_frame_errors +
6040 get_stat64(&hw_stats->rx_align_errors);
6041 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6042 get_stat64(&hw_stats->tx_discards);
6043 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6044 get_stat64(&hw_stats->tx_carrier_sense_errors);
6046 stats->rx_crc_errors = old_stats->rx_crc_errors +
6047 calc_crc_errors(tp);
6052 static inline u32 calc_crc(unsigned char *buf, int len)
6060 for (j = 0; j < len; j++) {
6063 for (k = 0; k < 8; k++) {
6077 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6079 /* accept or reject all multicast frames */
6080 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6081 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6082 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6083 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6086 static void __tg3_set_rx_mode(struct net_device *dev)
6088 struct tg3 *tp = netdev_priv(dev);
6091 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6092 RX_MODE_KEEP_VLAN_TAG);
6094 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6097 #if TG3_VLAN_TAG_USED
6099 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6100 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6102 /* By definition, VLAN is disabled always in this
6105 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6106 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6109 if (dev->flags & IFF_PROMISC) {
6110 /* Promiscuous mode. */
6111 rx_mode |= RX_MODE_PROMISC;
6112 } else if (dev->flags & IFF_ALLMULTI) {
6113 /* Accept all multicast. */
6114 tg3_set_multi (tp, 1);
6115 } else if (dev->mc_count < 1) {
6116 /* Reject all multicast. */
6117 tg3_set_multi (tp, 0);
6119 /* Accept one or more multicast(s). */
6120 struct dev_mc_list *mclist;
6122 u32 mc_filter[4] = { 0, };
6127 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6128 i++, mclist = mclist->next) {
6130 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6132 regidx = (bit & 0x60) >> 5;
6134 mc_filter[regidx] |= (1 << bit);
6137 tw32(MAC_HASH_REG_0, mc_filter[0]);
6138 tw32(MAC_HASH_REG_1, mc_filter[1]);
6139 tw32(MAC_HASH_REG_2, mc_filter[2]);
6140 tw32(MAC_HASH_REG_3, mc_filter[3]);
6143 if (rx_mode != tp->rx_mode) {
6144 tp->rx_mode = rx_mode;
6145 tw32_f(MAC_RX_MODE, rx_mode);
6150 static void tg3_set_rx_mode(struct net_device *dev)
6152 struct tg3 *tp = netdev_priv(dev);
6154 spin_lock_irq(&tp->lock);
6155 __tg3_set_rx_mode(dev);
6156 spin_unlock_irq(&tp->lock);
6159 #define TG3_REGDUMP_LEN (32 * 1024)
6161 static int tg3_get_regs_len(struct net_device *dev)
6163 return TG3_REGDUMP_LEN;
6166 static void tg3_get_regs(struct net_device *dev,
6167 struct ethtool_regs *regs, void *_p)
6170 struct tg3 *tp = netdev_priv(dev);
6176 memset(p, 0, TG3_REGDUMP_LEN);
6178 spin_lock_irq(&tp->lock);
6179 spin_lock(&tp->tx_lock);
6181 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6182 #define GET_REG32_LOOP(base,len) \
6183 do { p = (u32 *)(orig_p + (base)); \
6184 for (i = 0; i < len; i += 4) \
6185 __GET_REG32((base) + i); \
6187 #define GET_REG32_1(reg) \
6188 do { p = (u32 *)(orig_p + (reg)); \
6189 __GET_REG32((reg)); \
6192 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6193 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6194 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6195 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6196 GET_REG32_1(SNDDATAC_MODE);
6197 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6198 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6199 GET_REG32_1(SNDBDC_MODE);
6200 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6201 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6202 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6203 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6204 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6205 GET_REG32_1(RCVDCC_MODE);
6206 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6207 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6208 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6209 GET_REG32_1(MBFREE_MODE);
6210 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6211 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6212 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6213 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6214 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6215 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6216 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6217 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6218 GET_REG32_LOOP(FTQ_RESET, 0x120);
6219 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6220 GET_REG32_1(DMAC_MODE);
6221 GET_REG32_LOOP(GRC_MODE, 0x4c);
6222 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6223 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6226 #undef GET_REG32_LOOP
6229 spin_unlock(&tp->tx_lock);
6230 spin_unlock_irq(&tp->lock);
6233 static int tg3_get_eeprom_len(struct net_device *dev)
6235 return EEPROM_CHIP_SIZE;
6238 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6239 u32 offset, u32 *val);
6240 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6242 struct tg3 *tp = dev->priv;
6245 u32 i, offset, len, val, b_offset, b_count;
6247 offset = eeprom->offset;
6251 ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6254 eeprom->magic = swab32(eeprom->magic);
6257 /* adjustments to start on required 4 byte boundary */
6258 b_offset = offset & 3;
6259 b_count = 4 - b_offset;
6260 if (b_count > len) {
6261 /* i.e. offset=1 len=2 */
6264 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6267 memcpy(data, ((char*)&val) + b_offset, b_count);
6270 eeprom->len += b_count;
6273 /* read bytes upto the last 4 byte boundary */
6274 pd = &data[eeprom->len];
6275 for (i = 0; i < (len - (len & 3)); i += 4) {
6276 ret = tg3_nvram_read_using_eeprom(tp, offset + i,
6286 /* read last bytes not ending on 4 byte boundary */
6287 pd = &data[eeprom->len];
6289 b_offset = offset + len - b_count;
6290 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6293 memcpy(pd, ((char*)&val), b_count);
6294 eeprom->len += b_count;
6299 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6301 struct tg3 *tp = netdev_priv(dev);
6303 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6304 tp->link_config.phy_is_low_power)
6307 cmd->supported = (SUPPORTED_Autoneg);
6309 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6310 cmd->supported |= (SUPPORTED_1000baseT_Half |
6311 SUPPORTED_1000baseT_Full);
6313 if (tp->phy_id != PHY_ID_SERDES)
6314 cmd->supported |= (SUPPORTED_100baseT_Half |
6315 SUPPORTED_100baseT_Full |
6316 SUPPORTED_10baseT_Half |
6317 SUPPORTED_10baseT_Full |
6320 cmd->supported |= SUPPORTED_FIBRE;
6322 cmd->advertising = tp->link_config.advertising;
6323 cmd->speed = tp->link_config.active_speed;
6324 cmd->duplex = tp->link_config.active_duplex;
6326 cmd->phy_address = PHY_ADDR;
6327 cmd->transceiver = 0;
6328 cmd->autoneg = tp->link_config.autoneg;
6334 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6336 struct tg3 *tp = netdev_priv(dev);
6338 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6339 tp->link_config.phy_is_low_power)
6342 if (tp->phy_id == PHY_ID_SERDES) {
6343 /* These are the only valid advertisement bits allowed. */
6344 if (cmd->autoneg == AUTONEG_ENABLE &&
6345 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6346 ADVERTISED_1000baseT_Full |
6347 ADVERTISED_Autoneg |
6352 spin_lock_irq(&tp->lock);
6353 spin_lock(&tp->tx_lock);
6355 tp->link_config.autoneg = cmd->autoneg;
6356 if (cmd->autoneg == AUTONEG_ENABLE) {
6357 tp->link_config.advertising = cmd->advertising;
6358 tp->link_config.speed = SPEED_INVALID;
6359 tp->link_config.duplex = DUPLEX_INVALID;
6361 tp->link_config.advertising = 0;
6362 tp->link_config.speed = cmd->speed;
6363 tp->link_config.duplex = cmd->duplex;
6366 tg3_setup_phy(tp, 1);
6367 spin_unlock(&tp->tx_lock);
6368 spin_unlock_irq(&tp->lock);
6373 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6375 struct tg3 *tp = netdev_priv(dev);
6377 strcpy(info->driver, DRV_MODULE_NAME);
6378 strcpy(info->version, DRV_MODULE_VERSION);
6379 strcpy(info->bus_info, pci_name(tp->pdev));
6382 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6384 struct tg3 *tp = netdev_priv(dev);
6386 wol->supported = WAKE_MAGIC;
6388 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6389 wol->wolopts = WAKE_MAGIC;
6390 memset(&wol->sopass, 0, sizeof(wol->sopass));
6393 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6395 struct tg3 *tp = netdev_priv(dev);
6397 if (wol->wolopts & ~WAKE_MAGIC)
6399 if ((wol->wolopts & WAKE_MAGIC) &&
6400 tp->phy_id == PHY_ID_SERDES &&
6401 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6404 spin_lock_irq(&tp->lock);
6405 if (wol->wolopts & WAKE_MAGIC)
6406 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6408 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6409 spin_unlock_irq(&tp->lock);
6414 static u32 tg3_get_msglevel(struct net_device *dev)
6416 struct tg3 *tp = netdev_priv(dev);
6417 return tp->msg_enable;
6420 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6422 struct tg3 *tp = netdev_priv(dev);
6423 tp->msg_enable = value;
6426 #if TG3_TSO_SUPPORT != 0
6427 static int tg3_set_tso(struct net_device *dev, u32 value)
6429 struct tg3 *tp = netdev_priv(dev);
6431 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6436 return ethtool_op_set_tso(dev, value);
6440 static int tg3_nway_reset(struct net_device *dev)
6442 struct tg3 *tp = netdev_priv(dev);
6446 spin_lock_irq(&tp->lock);
6447 tg3_readphy(tp, MII_BMCR, &bmcr);
6448 tg3_readphy(tp, MII_BMCR, &bmcr);
6450 if (bmcr & BMCR_ANENABLE) {
6451 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6454 spin_unlock_irq(&tp->lock);
6459 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6461 struct tg3 *tp = netdev_priv(dev);
6463 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6464 ering->rx_mini_max_pending = 0;
6465 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6467 ering->rx_pending = tp->rx_pending;
6468 ering->rx_mini_pending = 0;
6469 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6470 ering->tx_pending = tp->tx_pending;
6473 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6475 struct tg3 *tp = netdev_priv(dev);
6477 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6478 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6479 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6483 spin_lock_irq(&tp->lock);
6484 spin_lock(&tp->tx_lock);
6486 tp->rx_pending = ering->rx_pending;
6488 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6489 tp->rx_pending > 63)
6490 tp->rx_pending = 63;
6491 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6492 tp->tx_pending = ering->tx_pending;
6496 netif_wake_queue(tp->dev);
6497 spin_unlock(&tp->tx_lock);
6498 spin_unlock_irq(&tp->lock);
6499 tg3_netif_start(tp);
6504 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6506 struct tg3 *tp = netdev_priv(dev);
6508 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6509 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6510 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6513 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6515 struct tg3 *tp = netdev_priv(dev);
6518 spin_lock_irq(&tp->lock);
6519 spin_lock(&tp->tx_lock);
6520 if (epause->autoneg)
6521 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6523 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6524 if (epause->rx_pause)
6525 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6527 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6528 if (epause->tx_pause)
6529 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6531 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6534 spin_unlock(&tp->tx_lock);
6535 spin_unlock_irq(&tp->lock);
6536 tg3_netif_start(tp);
6541 static u32 tg3_get_rx_csum(struct net_device *dev)
6543 struct tg3 *tp = netdev_priv(dev);
6544 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6547 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6549 struct tg3 *tp = netdev_priv(dev);
6551 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6557 spin_lock_irq(&tp->lock);
6559 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6561 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6562 spin_unlock_irq(&tp->lock);
6567 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6569 struct tg3 *tp = netdev_priv(dev);
6571 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6578 dev->features |= NETIF_F_IP_CSUM;
6580 dev->features &= ~NETIF_F_IP_CSUM;
6585 static int tg3_get_stats_count (struct net_device *dev)
6587 return TG3_NUM_STATS;
6590 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6592 switch (stringset) {
6594 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
6597 WARN_ON(1); /* we need a WARN() */
6602 static void tg3_get_ethtool_stats (struct net_device *dev,
6603 struct ethtool_stats *estats, u64 *tmp_stats)
6605 struct tg3 *tp = dev->priv;
6606 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6609 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6611 struct mii_ioctl_data *data = if_mii(ifr);
6612 struct tg3 *tp = netdev_priv(dev);
6617 data->phy_id = PHY_ADDR;
6623 if (tp->phy_id == PHY_ID_SERDES)
6624 break; /* We have no PHY */
6626 spin_lock_irq(&tp->lock);
6627 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6628 spin_unlock_irq(&tp->lock);
6630 data->val_out = mii_regval;
6636 if (tp->phy_id == PHY_ID_SERDES)
6637 break; /* We have no PHY */
6639 if (!capable(CAP_NET_ADMIN))
6642 spin_lock_irq(&tp->lock);
6643 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6644 spin_unlock_irq(&tp->lock);
6655 #if TG3_VLAN_TAG_USED
6656 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6658 struct tg3 *tp = netdev_priv(dev);
6660 spin_lock_irq(&tp->lock);
6661 spin_lock(&tp->tx_lock);
6665 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6666 __tg3_set_rx_mode(dev);
6668 spin_unlock(&tp->tx_lock);
6669 spin_unlock_irq(&tp->lock);
6672 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6674 struct tg3 *tp = netdev_priv(dev);
6676 spin_lock_irq(&tp->lock);
6677 spin_lock(&tp->tx_lock);
6679 tp->vlgrp->vlan_devices[vid] = NULL;
6680 spin_unlock(&tp->tx_lock);
6681 spin_unlock_irq(&tp->lock);
6685 static struct ethtool_ops tg3_ethtool_ops = {
6686 .get_settings = tg3_get_settings,
6687 .set_settings = tg3_set_settings,
6688 .get_drvinfo = tg3_get_drvinfo,
6689 .get_regs_len = tg3_get_regs_len,
6690 .get_regs = tg3_get_regs,
6691 .get_wol = tg3_get_wol,
6692 .set_wol = tg3_set_wol,
6693 .get_msglevel = tg3_get_msglevel,
6694 .set_msglevel = tg3_set_msglevel,
6695 .nway_reset = tg3_nway_reset,
6696 .get_link = ethtool_op_get_link,
6697 .get_eeprom_len = tg3_get_eeprom_len,
6698 .get_eeprom = tg3_get_eeprom,
6699 .get_ringparam = tg3_get_ringparam,
6700 .set_ringparam = tg3_set_ringparam,
6701 .get_pauseparam = tg3_get_pauseparam,
6702 .set_pauseparam = tg3_set_pauseparam,
6703 .get_rx_csum = tg3_get_rx_csum,
6704 .set_rx_csum = tg3_set_rx_csum,
6705 .get_tx_csum = ethtool_op_get_tx_csum,
6706 .set_tx_csum = tg3_set_tx_csum,
6707 .get_sg = ethtool_op_get_sg,
6708 .set_sg = ethtool_op_set_sg,
6709 #if TG3_TSO_SUPPORT != 0
6710 .get_tso = ethtool_op_get_tso,
6711 .set_tso = tg3_set_tso,
6713 .get_strings = tg3_get_strings,
6714 .get_stats_count = tg3_get_stats_count,
6715 .get_ethtool_stats = tg3_get_ethtool_stats,
6718 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6719 static void __devinit tg3_nvram_init(struct tg3 *tp)
6723 if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6726 tw32_f(GRC_EEPROM_ADDR,
6727 (EEPROM_ADDR_FSM_RESET |
6728 (EEPROM_DEFAULT_CLOCK_PERIOD <<
6729 EEPROM_ADDR_CLKPERD_SHIFT)));
6731 /* XXX schedule_timeout() ... */
6732 for (j = 0; j < 100; j++)
6735 /* Enable seeprom accesses. */
6736 tw32_f(GRC_LOCAL_CTRL,
6737 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6740 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6741 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6745 u32 nvaccess = tr32(NVRAM_ACCESS);
6747 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6750 nvcfg1 = tr32(NVRAM_CFG1);
6752 tp->tg3_flags |= TG3_FLAG_NVRAM;
6753 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6754 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6755 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6758 tw32(NVRAM_CFG1, nvcfg1);
6761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6762 u32 nvaccess = tr32(NVRAM_ACCESS);
6764 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6767 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6771 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6772 u32 offset, u32 *val)
6777 if (offset > EEPROM_ADDR_ADDR_MASK ||
6781 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6782 EEPROM_ADDR_DEVID_MASK |
6784 tw32(GRC_EEPROM_ADDR,
6786 (0 << EEPROM_ADDR_DEVID_SHIFT) |
6787 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6788 EEPROM_ADDR_ADDR_MASK) |
6789 EEPROM_ADDR_READ | EEPROM_ADDR_START);
6791 for (i = 0; i < 10000; i++) {
6792 tmp = tr32(GRC_EEPROM_ADDR);
6794 if (tmp & EEPROM_ADDR_COMPLETE)
6798 if (!(tmp & EEPROM_ADDR_COMPLETE))
6801 *val = tr32(GRC_EEPROM_DATA);
6805 static int __devinit tg3_nvram_read(struct tg3 *tp,
6806 u32 offset, u32 *val)
6810 if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6811 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6815 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6816 return tg3_nvram_read_using_eeprom(tp, offset, val);
6818 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6819 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6820 NVRAM_BUFFERED_PAGE_POS) +
6821 (offset % NVRAM_BUFFERED_PAGE_SIZE);
6823 if (offset > NVRAM_ADDR_MSK)
6828 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6829 u32 nvaccess = tr32(NVRAM_ACCESS);
6831 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6834 tw32(NVRAM_ADDR, offset);
6836 NVRAM_CMD_RD | NVRAM_CMD_GO |
6837 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6839 /* Wait for done bit to clear. */
6840 for (i = 0; i < 1000; i++) {
6842 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6844 *val = swab32(tr32(NVRAM_RDDATA));
6849 tg3_nvram_unlock(tp);
6851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6852 u32 nvaccess = tr32(NVRAM_ACCESS);
6854 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6863 struct subsys_tbl_ent {
6864 u16 subsys_vendor, subsys_devid;
6868 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6869 /* Broadcom boards. */
6870 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6871 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6872 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6873 { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES }, /* BCM95700A9 */
6874 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6875 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6876 { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES }, /* BCM95701A7 */
6877 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6878 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6879 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6880 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6883 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6884 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6885 { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES }, /* 3C996SX */
6886 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6887 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6890 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6891 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6892 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6893 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6895 /* Compaq boards. */
6896 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6897 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6898 { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES }, /* CHANGELING */
6899 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6900 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6903 { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6906 static int __devinit tg3_phy_probe(struct tg3 *tp)
6908 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6909 u32 hw_phy_id, hw_phy_id_masked;
6911 int i, eeprom_signature_found, err;
6913 tp->phy_id = PHY_ID_INVALID;
6914 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6915 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6916 tp->pdev->subsystem_vendor) &&
6917 (subsys_id_to_phy_id[i].subsys_devid ==
6918 tp->pdev->subsystem_device)) {
6919 tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6924 eeprom_phy_id = PHY_ID_INVALID;
6925 eeprom_signature_found = 0;
6926 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6927 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6928 u32 nic_cfg, led_cfg;
6930 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6931 tp->nic_sram_data_cfg = nic_cfg;
6933 eeprom_signature_found = 1;
6935 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
6936 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
6937 eeprom_phy_id = PHY_ID_SERDES;
6941 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
6942 if (nic_phy_id != 0) {
6943 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
6944 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
6946 eeprom_phy_id = (id1 >> 16) << 10;
6947 eeprom_phy_id |= (id2 & 0xfc00) << 16;
6948 eeprom_phy_id |= (id2 & 0x03ff) << 0;
6952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6953 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
6954 led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
6955 SHASTA_EXT_LED_MODE_MASK);
6957 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
6961 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
6962 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
6965 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
6966 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
6969 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
6970 tp->led_ctrl = LED_CTRL_MODE_MAC;
6973 case SHASTA_EXT_LED_SHARED:
6974 tp->led_ctrl = LED_CTRL_MODE_SHARED;
6975 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6976 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
6977 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
6978 LED_CTRL_MODE_PHY_2);
6981 case SHASTA_EXT_LED_MAC:
6982 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
6985 case SHASTA_EXT_LED_COMBO:
6986 tp->led_ctrl = LED_CTRL_MODE_COMBO;
6987 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
6988 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
6989 LED_CTRL_MODE_PHY_2);
6994 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
6996 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
6997 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
6999 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7000 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7001 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7002 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7003 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7005 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7006 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7008 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7010 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7011 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7014 /* Reading the PHY ID register can conflict with ASF
7015 * firwmare access to the PHY hardware.
7018 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7019 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7021 /* Now read the physical PHY_ID from the chip and verify
7022 * that it is sane. If it doesn't look good, we fall back
7023 * to either the hard-coded table based PHY_ID and failing
7024 * that the value found in the eeprom area.
7026 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7027 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7029 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
7030 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7031 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
7033 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7036 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7037 tp->phy_id = hw_phy_id;
7039 /* phy_id currently holds the value found in the
7040 * subsys_id_to_phy_id[] table or PHY_ID_INVALID
7041 * if a match was not found there.
7043 if (tp->phy_id == PHY_ID_INVALID) {
7044 if (!eeprom_signature_found ||
7045 !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
7047 tp->phy_id = eeprom_phy_id;
7051 if (tp->phy_id != PHY_ID_SERDES &&
7052 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7053 u32 bmsr, adv_reg, tg3_ctrl;
7055 tg3_readphy(tp, MII_BMSR, &bmsr);
7056 tg3_readphy(tp, MII_BMSR, &bmsr);
7058 if (bmsr & BMSR_LSTATUS)
7059 goto skip_phy_reset;
7061 err = tg3_phy_reset(tp);
7065 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7066 ADVERTISE_100HALF | ADVERTISE_100FULL |
7067 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7069 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7070 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7071 MII_TG3_CTRL_ADV_1000_FULL);
7072 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7073 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7074 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7075 MII_TG3_CTRL_ENABLE_AS_MASTER);
7078 if (!tg3_copper_is_advertising_all(tp)) {
7079 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7081 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7082 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7084 tg3_writephy(tp, MII_BMCR,
7085 BMCR_ANENABLE | BMCR_ANRESTART);
7087 tg3_phy_set_wirespeed(tp);
7089 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7090 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7091 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7095 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7096 err = tg3_init_5401phy_dsp(tp);
7101 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7102 err = tg3_init_5401phy_dsp(tp);
7105 if (!eeprom_signature_found)
7106 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7108 if (tp->phy_id == PHY_ID_SERDES)
7109 tp->link_config.advertising =
7110 (ADVERTISED_1000baseT_Half |
7111 ADVERTISED_1000baseT_Full |
7112 ADVERTISED_Autoneg |
7114 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7115 tp->link_config.advertising &=
7116 ~(ADVERTISED_1000baseT_Half |
7117 ADVERTISED_1000baseT_Full);
7122 static void __devinit tg3_read_partno(struct tg3 *tp)
7124 unsigned char vpd_data[256];
7127 if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
7128 /* Sun decided not to put the necessary bits in the
7129 * NVRAM of their onboard tg3 parts :(
7131 strcpy(tp->board_part_number, "Sun 5704");
7135 for (i = 0; i < 256; i += 4) {
7138 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7141 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
7142 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
7143 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7144 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7147 /* Now parse and find the part number. */
7148 for (i = 0; i < 256; ) {
7149 unsigned char val = vpd_data[i];
7152 if (val == 0x82 || val == 0x91) {
7155 (vpd_data[i + 2] << 8)));
7162 block_end = (i + 3 +
7164 (vpd_data[i + 2] << 8)));
7166 while (i < block_end) {
7167 if (vpd_data[i + 0] == 'P' &&
7168 vpd_data[i + 1] == 'N') {
7169 int partno_len = vpd_data[i + 2];
7171 if (partno_len > 24)
7174 memcpy(tp->board_part_number,
7183 /* Part number not found. */
7188 strcpy(tp->board_part_number, "none");
7191 #ifdef CONFIG_SPARC64
7192 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
7194 struct pci_dev *pdev = tp->pdev;
7195 struct pcidev_cookie *pcp = pdev->sysdata;
7198 int node = pcp->prom_node;
7202 err = prom_getproperty(node, "subsystem-vendor-id",
7203 (char *) &venid, sizeof(venid));
7204 if (err == 0 || err == -1)
7206 err = prom_getproperty(node, "subsystem-id",
7207 (char *) &devid, sizeof(devid));
7208 if (err == 0 || err == -1)
7211 if (venid == PCI_VENDOR_ID_SUN &&
7212 devid == PCI_DEVICE_ID_TIGON3_5704)
7219 static int __devinit tg3_get_invariants(struct tg3 *tp)
7222 u32 cacheline_sz_reg;
7223 u32 pci_state_reg, grc_misc_cfg;
7228 #ifdef CONFIG_SPARC64
7229 if (tg3_is_sun_5704(tp))
7230 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
7233 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7234 * reordering to the mailbox registers done by the host
7235 * controller can cause major troubles. We read back from
7236 * every mailbox register write to force the writes to be
7237 * posted to the chip in order.
7239 if (pci_find_device(PCI_VENDOR_ID_INTEL,
7240 PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7241 pci_find_device(PCI_VENDOR_ID_INTEL,
7242 PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7243 pci_find_device(PCI_VENDOR_ID_INTEL,
7244 PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7245 pci_find_device(PCI_VENDOR_ID_INTEL,
7246 PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7247 pci_find_device(PCI_VENDOR_ID_AMD,
7248 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7249 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7251 /* Force memory write invalidate off. If we leave it on,
7252 * then on 5700_BX chips we have to enable a workaround.
7253 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7254 * to match the cacheline size. The Broadcom driver have this
7255 * workaround but turns MWI off all the times so never uses
7256 * it. This seems to suggest that the workaround is insufficient.
7258 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7259 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7260 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7262 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7263 * has the register indirect write enable bit set before
7264 * we try to access any of the MMIO registers. It is also
7265 * critical that the PCI-X hw workaround situation is decided
7266 * before that as well.
7268 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7271 tp->pci_chip_rev_id = (misc_ctrl_reg >>
7272 MISC_HOST_CTRL_CHIPREV_SHIFT);
7274 /* Initialize misc host control in PCI block. */
7275 tp->misc_host_ctrl |= (misc_ctrl_reg &
7276 MISC_HOST_CTRL_CHIPREV);
7277 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7278 tp->misc_host_ctrl);
7280 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7283 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
7284 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
7285 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
7286 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
7288 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7289 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7292 tp->pci_lat_timer < 64) {
7293 tp->pci_lat_timer = 64;
7295 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
7296 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
7297 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
7298 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
7300 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7304 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7307 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7308 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7310 /* If this is a 5700 BX chipset, and we are in PCI-X
7311 * mode, enable register write workaround.
7313 * The workaround is to use indirect register accesses
7314 * for all chip writes not to mailbox registers.
7316 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7320 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7322 /* The chip can have it's power management PCI config
7323 * space registers clobbered due to this bug.
7324 * So explicitly force the chip into D0 here.
7326 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7328 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7329 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7330 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7333 /* Also, force SERR#/PERR# in PCI command. */
7334 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7335 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7336 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7340 /* Back to back register writes can cause problems on this chip,
7341 * the workaround is to read back all reg writes except those to
7342 * mailbox regs. See tg3_write_indirect_reg32().
7344 * PCI Express 5750_A0 rev chips need this workaround too.
7346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7347 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7348 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7349 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7351 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7352 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7353 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7354 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7356 /* Chip-specific fixup from Broadcom driver */
7357 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7358 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7359 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7360 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7363 /* Force the chip into D0. */
7364 err = tg3_set_power_state(tp, 0);
7366 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7367 pci_name(tp->pdev));
7371 /* 5700 B0 chips do not support checksumming correctly due
7374 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7375 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7377 /* Pseudo-header checksum is done by hardware logic and not
7378 * the offload processers, so make the chip do the pseudo-
7379 * header checksums on receive. For transmit it is more
7380 * convenient to do the pseudo-header checksum in software
7381 * as Linux does that on transmit for us in all cases.
7383 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7384 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7386 /* Derive initial jumbo mode from MTU assigned in
7387 * ether_setup() via the alloc_etherdev() call
7389 if (tp->dev->mtu > ETH_DATA_LEN)
7390 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7392 /* Determine WakeOnLan speed to use. */
7393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7394 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7395 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7396 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7397 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7399 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7402 /* A few boards don't want Ethernet@WireSpeed phy feature */
7403 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7404 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7405 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7406 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7407 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7409 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7410 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7411 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7412 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7413 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7417 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7419 /* Only 5701 and later support tagged irq status mode.
7420 * Also, 5788 chips cannot use tagged irq status.
7422 * However, since we are using NAPI avoid tagged irq status
7423 * because the interrupt condition is more difficult to
7424 * fully clear in that mode.
7426 tp->coalesce_mode = 0;
7428 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7429 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7430 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7432 /* Initialize MAC MI mode, polling disabled. */
7433 tw32_f(MAC_MI_MODE, tp->mi_mode);
7436 /* Initialize data/descriptor byte/word swapping. */
7437 val = tr32(GRC_MODE);
7438 val &= GRC_MODE_HOST_STACKUP;
7439 tw32(GRC_MODE, val | tp->grc_mode);
7441 tg3_switch_clocks(tp);
7443 /* Clear this out for sanity. */
7444 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7446 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7448 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7449 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7450 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7452 if (chiprevid == CHIPREV_ID_5701_A0 ||
7453 chiprevid == CHIPREV_ID_5701_B0 ||
7454 chiprevid == CHIPREV_ID_5701_B2 ||
7455 chiprevid == CHIPREV_ID_5701_B5) {
7456 unsigned long sram_base;
7458 /* Write some dummy words into the SRAM status block
7459 * area, see if it reads back correctly. If the return
7460 * value is bad, force enable the PCIX workaround.
7462 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7464 writel(0x00000000, sram_base);
7465 writel(0x00000000, sram_base + 4);
7466 writel(0xffffffff, sram_base + 4);
7467 if (readl(sram_base) != 0x00000000)
7468 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7475 /* Always use host TXDs, it performs better in particular
7476 * with multi-frag packets. The tests below are kept here
7477 * as documentation should we change this decision again
7480 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7483 /* Determine if TX descriptors will reside in
7484 * main memory or in the chip SRAM.
7486 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
7487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7489 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7492 grc_misc_cfg = tr32(GRC_MISC_CFG);
7493 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7496 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7497 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7498 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7502 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7503 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7504 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7506 /* these are limited to 10/100 only */
7507 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7508 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7509 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7510 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7511 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7512 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7513 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7514 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7515 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7516 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7518 err = tg3_phy_probe(tp);
7520 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7521 pci_name(tp->pdev), err);
7522 /* ... but do not return immediately ... */
7525 tg3_read_partno(tp);
7527 if (tp->phy_id == PHY_ID_SERDES) {
7528 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7531 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7533 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7536 /* 5700 {AX,BX} chips have a broken status block link
7537 * change bit implementation, so we must use the
7538 * status register in those cases.
7540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7541 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7543 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7545 /* The led_ctrl is set during tg3_phy_probe, here we might
7546 * have to force the link status polling mechanism based
7547 * upon subsystem IDs.
7549 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7550 tp->phy_id != PHY_ID_SERDES) {
7551 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7552 TG3_FLAG_USE_LINKCHG_REG);
7555 /* For all SERDES we poll the MAC status register. */
7556 if (tp->phy_id == PHY_ID_SERDES)
7557 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7559 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7561 /* 5700 BX chips need to have their TX producer index mailboxes
7562 * written twice to workaround a bug.
7564 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7565 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7567 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7569 /* It seems all chips can get confused if TX buffers
7570 * straddle the 4GB address boundary in some cases.
7572 tp->dev->hard_start_xmit = tg3_start_xmit;
7575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7576 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7579 /* By default, disable wake-on-lan. User can change this
7580 * using ETHTOOL_SWOL.
7582 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7587 #ifdef CONFIG_SPARC64
7588 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7590 struct net_device *dev = tp->dev;
7591 struct pci_dev *pdev = tp->pdev;
7592 struct pcidev_cookie *pcp = pdev->sysdata;
7595 int node = pcp->prom_node;
7597 if (prom_getproplen(node, "local-mac-address") == 6) {
7598 prom_getproperty(node, "local-mac-address",
7606 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7608 struct net_device *dev = tp->dev;
7610 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7615 static int __devinit tg3_get_device_address(struct tg3 *tp)
7617 struct net_device *dev = tp->dev;
7618 u32 hi, lo, mac_offset;
7620 #ifdef CONFIG_SPARC64
7621 if (!tg3_get_macaddr_sparc(tp))
7626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7627 !(tp->tg3_flags & TG3_FLG2_SUN_5704)) {
7628 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7630 if (tg3_nvram_lock(tp))
7631 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7633 tg3_nvram_unlock(tp);
7636 /* First try to get it from MAC address mailbox. */
7637 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7638 if ((hi >> 16) == 0x484b) {
7639 dev->dev_addr[0] = (hi >> 8) & 0xff;
7640 dev->dev_addr[1] = (hi >> 0) & 0xff;
7642 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7643 dev->dev_addr[2] = (lo >> 24) & 0xff;
7644 dev->dev_addr[3] = (lo >> 16) & 0xff;
7645 dev->dev_addr[4] = (lo >> 8) & 0xff;
7646 dev->dev_addr[5] = (lo >> 0) & 0xff;
7648 /* Next, try NVRAM. */
7649 else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7650 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7651 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7652 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7653 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7654 dev->dev_addr[2] = ((lo >> 0) & 0xff);
7655 dev->dev_addr[3] = ((lo >> 8) & 0xff);
7656 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7657 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7659 /* Finally just fetch it out of the MAC control regs. */
7661 hi = tr32(MAC_ADDR_0_HIGH);
7662 lo = tr32(MAC_ADDR_0_LOW);
7664 dev->dev_addr[5] = lo & 0xff;
7665 dev->dev_addr[4] = (lo >> 8) & 0xff;
7666 dev->dev_addr[3] = (lo >> 16) & 0xff;
7667 dev->dev_addr[2] = (lo >> 24) & 0xff;
7668 dev->dev_addr[1] = hi & 0xff;
7669 dev->dev_addr[0] = (hi >> 8) & 0xff;
7672 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7673 #ifdef CONFIG_SPARC64
7674 if (!tg3_get_default_macaddr_sparc(tp))
7682 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7684 struct tg3_internal_buffer_desc test_desc;
7688 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7690 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7691 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7692 tw32(RDMAC_STATUS, 0);
7693 tw32(WDMAC_STATUS, 0);
7695 tw32(BUFMGR_MODE, 0);
7698 test_desc.addr_hi = ((u64) buf_dma) >> 32;
7699 test_desc.addr_lo = buf_dma & 0xffffffff;
7700 test_desc.nic_mbuf = 0x00002100;
7701 test_desc.len = size;
7704 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7705 * the *second* time the tg3 driver was getting loaded after an
7708 * Broadcom tells me:
7709 * ...the DMA engine is connected to the GRC block and a DMA
7710 * reset may affect the GRC block in some unpredictable way...
7711 * The behavior of resets to individual blocks has not been tested.
7713 * Broadcom noted the GRC reset will also reset all sub-components.
7716 test_desc.cqid_sqid = (13 << 8) | 2;
7718 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7721 test_desc.cqid_sqid = (16 << 8) | 7;
7723 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7726 test_desc.flags = 0x00000005;
7728 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7731 val = *(((u32 *)&test_desc) + i);
7732 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7733 sram_dma_descs + (i * sizeof(u32)));
7734 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7736 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7739 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7741 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7745 for (i = 0; i < 40; i++) {
7749 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7751 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7752 if ((val & 0xffff) == sram_dma_descs) {
7763 #define TEST_BUFFER_SIZE 0x400
7765 static int __devinit tg3_test_dma(struct tg3 *tp)
7771 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7777 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7778 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7784 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7787 cacheline_size = 1024;
7789 cacheline_size = (int) byte * 4;
7791 switch (cacheline_size) {
7796 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7797 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7799 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7801 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7803 ~(DMA_RWCTRL_PCI_WRITE_CMD);
7805 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7810 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7811 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7813 DMA_RWCTRL_WRITE_BNDRY_256;
7814 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7816 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7821 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7822 tp->dma_rwctrl |= 0x001f0000;
7823 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7826 tp->dma_rwctrl |= 0x003f0000;
7828 tp->dma_rwctrl |= 0x003f000f;
7830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7831 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7832 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7834 if (ccval == 0x6 || ccval == 0x7)
7835 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7837 /* Set bit 23 to renable PCIX hw bug fix */
7838 tp->dma_rwctrl |= 0x009f0000;
7840 tp->dma_rwctrl |= 0x001b000f;
7844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7846 tp->dma_rwctrl &= 0xfffffff0;
7848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7850 /* Remove this if it causes problems for some boards. */
7851 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7853 /* On 5700/5701 chips, we need to set this bit.
7854 * Otherwise the chip will issue cacheline transactions
7855 * to streamable DMA memory with not all the byte
7856 * enables turned on. This is an error on several
7857 * RISC PCI controllers, in particular sparc64.
7859 * On 5703/5704 chips, this bit has been reassigned
7860 * a different meaning. In particular, it is used
7861 * on those chips to enable a PCI-X workaround.
7863 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7866 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7869 /* Unneeded, already done by tg3_get_invariants. */
7870 tg3_switch_clocks(tp);
7874 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7875 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7881 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7884 /* Send the buffer to the chip. */
7885 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7887 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7892 /* validate data reached card RAM correctly. */
7893 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7895 tg3_read_mem(tp, 0x2100 + (i*4), &val);
7896 if (le32_to_cpu(val) != p[i]) {
7897 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
7898 /* ret = -ENODEV here? */
7903 /* Now read it back. */
7904 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7906 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7912 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7916 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7917 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7918 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7919 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7922 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7928 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
7936 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
7941 static void __devinit tg3_init_link_config(struct tg3 *tp)
7943 tp->link_config.advertising =
7944 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
7945 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
7946 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
7947 ADVERTISED_Autoneg | ADVERTISED_MII);
7948 tp->link_config.speed = SPEED_INVALID;
7949 tp->link_config.duplex = DUPLEX_INVALID;
7950 tp->link_config.autoneg = AUTONEG_ENABLE;
7951 netif_carrier_off(tp->dev);
7952 tp->link_config.active_speed = SPEED_INVALID;
7953 tp->link_config.active_duplex = DUPLEX_INVALID;
7954 tp->link_config.phy_is_low_power = 0;
7955 tp->link_config.orig_speed = SPEED_INVALID;
7956 tp->link_config.orig_duplex = DUPLEX_INVALID;
7957 tp->link_config.orig_autoneg = AUTONEG_INVALID;
7960 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
7962 tp->bufmgr_config.mbuf_read_dma_low_water =
7963 DEFAULT_MB_RDMA_LOW_WATER;
7964 tp->bufmgr_config.mbuf_mac_rx_low_water =
7965 DEFAULT_MB_MACRX_LOW_WATER;
7966 tp->bufmgr_config.mbuf_high_water =
7967 DEFAULT_MB_HIGH_WATER;
7969 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
7970 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
7971 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
7972 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
7973 tp->bufmgr_config.mbuf_high_water_jumbo =
7974 DEFAULT_MB_HIGH_WATER_JUMBO;
7976 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
7977 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
7980 static char * __devinit tg3_phy_string(struct tg3 *tp)
7982 switch (tp->phy_id & PHY_ID_MASK) {
7983 case PHY_ID_BCM5400: return "5400";
7984 case PHY_ID_BCM5401: return "5401";
7985 case PHY_ID_BCM5411: return "5411";
7986 case PHY_ID_BCM5701: return "5701";
7987 case PHY_ID_BCM5703: return "5703";
7988 case PHY_ID_BCM5704: return "5704";
7989 case PHY_ID_BCM5705: return "5705";
7990 case PHY_ID_BCM5750: return "5750";
7991 case PHY_ID_BCM8002: return "8002";
7992 case PHY_ID_SERDES: return "serdes";
7993 default: return "unknown";
7997 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
7999 struct pci_dev *peer;
8000 unsigned int func, devnr = tp->pdev->devfn & ~7;
8002 for (func = 0; func < 8; func++) {
8003 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8004 if (peer && peer != tp->pdev)
8008 if (!peer || peer == tp->pdev)
8012 * We don't need to keep the refcount elevated; there's no way
8013 * to remove one half of this device without removing the other
8020 static int __devinit tg3_init_one(struct pci_dev *pdev,
8021 const struct pci_device_id *ent)
8023 static int tg3_version_printed = 0;
8024 unsigned long tg3reg_base, tg3reg_len;
8025 struct net_device *dev;
8027 int i, err, pci_using_dac, pm_cap;
8029 if (tg3_version_printed++ == 0)
8030 printk(KERN_INFO "%s", version);
8032 err = pci_enable_device(pdev);
8034 printk(KERN_ERR PFX "Cannot enable PCI device, "
8039 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8040 printk(KERN_ERR PFX "Cannot find proper PCI device "
8041 "base address, aborting.\n");
8043 goto err_out_disable_pdev;
8046 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8048 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8050 goto err_out_disable_pdev;
8053 pci_set_master(pdev);
8055 /* Find power-management capability. */
8056 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8058 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8061 goto err_out_free_res;
8064 /* Configure DMA attributes. */
8065 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8068 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8070 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8071 "for consistent allocations\n");
8072 goto err_out_free_res;
8075 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8077 printk(KERN_ERR PFX "No usable DMA configuration, "
8079 goto err_out_free_res;
8084 tg3reg_base = pci_resource_start(pdev, 0);
8085 tg3reg_len = pci_resource_len(pdev, 0);
8087 dev = alloc_etherdev(sizeof(*tp));
8089 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8091 goto err_out_free_res;
8094 SET_MODULE_OWNER(dev);
8095 SET_NETDEV_DEV(dev, &pdev->dev);
8098 dev->features |= NETIF_F_HIGHDMA;
8099 #if TG3_VLAN_TAG_USED
8100 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8101 dev->vlan_rx_register = tg3_vlan_rx_register;
8102 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8105 tp = netdev_priv(dev);
8108 tp->pm_cap = pm_cap;
8109 tp->mac_mode = TG3_DEF_MAC_MODE;
8110 tp->rx_mode = TG3_DEF_RX_MODE;
8111 tp->tx_mode = TG3_DEF_TX_MODE;
8112 tp->mi_mode = MAC_MI_MODE_BASE;
8114 tp->msg_enable = tg3_debug;
8116 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8118 /* The word/byte swap controls here control register access byte
8119 * swapping. DMA data byte swapping is controlled in the GRC_MODE
8122 tp->misc_host_ctrl =
8123 MISC_HOST_CTRL_MASK_PCI_INT |
8124 MISC_HOST_CTRL_WORD_SWAP |
8125 MISC_HOST_CTRL_INDIR_ACCESS |
8126 MISC_HOST_CTRL_PCISTATE_RW;
8128 /* The NONFRM (non-frame) byte/word swap controls take effect
8129 * on descriptor entries, anything which isn't packet data.
8131 * The StrongARM chips on the board (one for tx, one for rx)
8132 * are running in big-endian mode.
8134 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8135 GRC_MODE_WSWAP_NONFRM_DATA);
8137 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8139 spin_lock_init(&tp->lock);
8140 spin_lock_init(&tp->tx_lock);
8141 spin_lock_init(&tp->indirect_lock);
8142 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8144 tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
8145 if (tp->regs == 0UL) {
8146 printk(KERN_ERR PFX "Cannot map device registers, "
8149 goto err_out_free_dev;
8152 tg3_init_link_config(tp);
8154 tg3_init_bufmgr_config(tp);
8156 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8157 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8158 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8160 dev->open = tg3_open;
8161 dev->stop = tg3_close;
8162 dev->get_stats = tg3_get_stats;
8163 dev->set_multicast_list = tg3_set_rx_mode;
8164 dev->set_mac_address = tg3_set_mac_addr;
8165 dev->do_ioctl = tg3_ioctl;
8166 dev->tx_timeout = tg3_tx_timeout;
8167 dev->poll = tg3_poll;
8168 dev->ethtool_ops = &tg3_ethtool_ops;
8170 dev->watchdog_timeo = TG3_TX_TIMEOUT;
8171 dev->change_mtu = tg3_change_mtu;
8172 dev->irq = pdev->irq;
8173 #ifdef CONFIG_NET_POLL_CONTROLLER
8174 dev->poll_controller = tg3_poll_controller;
8177 err = tg3_get_invariants(tp);
8179 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8181 goto err_out_iounmap;
8184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8186 tp->bufmgr_config.mbuf_read_dma_low_water =
8187 DEFAULT_MB_RDMA_LOW_WATER_5705;
8188 tp->bufmgr_config.mbuf_mac_rx_low_water =
8189 DEFAULT_MB_MACRX_LOW_WATER_5705;
8190 tp->bufmgr_config.mbuf_high_water =
8191 DEFAULT_MB_HIGH_WATER_5705;
8194 #if TG3_TSO_SUPPORT != 0
8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8197 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8198 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8199 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8200 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8202 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8205 /* TSO is off by default, user can enable using ethtool. */
8207 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8208 dev->features |= NETIF_F_TSO;
8213 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8214 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8215 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8216 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8217 tp->rx_pending = 63;
8220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8221 tp->pdev_peer = tg3_find_5704_peer(tp);
8223 err = tg3_get_device_address(tp);
8225 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8227 goto err_out_iounmap;
8231 * Reset chip in case UNDI or EFI driver did not shutdown
8232 * DMA self test will enable WDMAC and we'll see (spurious)
8233 * pending DMA on the PCI bus at that point.
8235 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8236 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8237 pci_save_state(tp->pdev, tp->pci_cfg_state);
8238 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8242 err = tg3_test_dma(tp);
8244 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8245 goto err_out_iounmap;
8248 /* Tigon3 can do ipv4 only... and some chips have buggy
8251 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8252 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8253 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8255 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8257 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8258 dev->features &= ~NETIF_F_HIGHDMA;
8260 err = register_netdev(dev);
8262 printk(KERN_ERR PFX "Cannot register net device, "
8264 goto err_out_iounmap;
8267 pci_set_drvdata(pdev, dev);
8269 /* Now that we have fully setup the chip, save away a snapshot
8270 * of the PCI config space. We need to restore this after
8271 * GRC_MISC_CFG core clock resets and some resume events.
8273 pci_save_state(tp->pdev, tp->pci_cfg_state);
8275 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8277 tp->board_part_number,
8278 tp->pci_chip_rev_id,
8280 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8281 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8282 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8283 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8284 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8285 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8287 for (i = 0; i < 6; i++)
8288 printk("%2.2x%c", dev->dev_addr[i],
8289 i == 5 ? '\n' : ':');
8291 printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
8292 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8295 (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
8296 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8297 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8298 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8299 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8300 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8301 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8302 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8307 iounmap((void *) tp->regs);
8313 pci_release_regions(pdev);
8315 err_out_disable_pdev:
8316 pci_disable_device(pdev);
8317 pci_set_drvdata(pdev, NULL);
8321 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8323 struct net_device *dev = pci_get_drvdata(pdev);
8326 struct tg3 *tp = netdev_priv(dev);
8328 unregister_netdev(dev);
8329 iounmap((void *)tp->regs);
8331 pci_release_regions(pdev);
8332 pci_disable_device(pdev);
8333 pci_set_drvdata(pdev, NULL);
8337 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8339 struct net_device *dev = pci_get_drvdata(pdev);
8340 struct tg3 *tp = netdev_priv(dev);
8343 if (!netif_running(dev))
8348 del_timer_sync(&tp->timer);
8350 spin_lock_irq(&tp->lock);
8351 spin_lock(&tp->tx_lock);
8352 tg3_disable_ints(tp);
8353 spin_unlock(&tp->tx_lock);
8354 spin_unlock_irq(&tp->lock);
8356 netif_device_detach(dev);
8358 spin_lock_irq(&tp->lock);
8359 spin_lock(&tp->tx_lock);
8361 spin_unlock(&tp->tx_lock);
8362 spin_unlock_irq(&tp->lock);
8364 err = tg3_set_power_state(tp, state);
8366 spin_lock_irq(&tp->lock);
8367 spin_lock(&tp->tx_lock);
8371 tp->timer.expires = jiffies + tp->timer_offset;
8372 add_timer(&tp->timer);
8374 spin_unlock(&tp->tx_lock);
8375 spin_unlock_irq(&tp->lock);
8377 netif_device_attach(dev);
8378 tg3_netif_start(tp);
8384 static int tg3_resume(struct pci_dev *pdev)
8386 struct net_device *dev = pci_get_drvdata(pdev);
8387 struct tg3 *tp = netdev_priv(dev);
8390 if (!netif_running(dev))
8393 pci_restore_state(tp->pdev, tp->pci_cfg_state);
8395 err = tg3_set_power_state(tp, 0);
8399 netif_device_attach(dev);
8401 spin_lock_irq(&tp->lock);
8402 spin_lock(&tp->tx_lock);
8406 tp->timer.expires = jiffies + tp->timer_offset;
8407 add_timer(&tp->timer);
8409 tg3_enable_ints(tp);
8411 spin_unlock(&tp->tx_lock);
8412 spin_unlock_irq(&tp->lock);
8414 tg3_netif_start(tp);
8419 static struct pci_driver tg3_driver = {
8420 .name = DRV_MODULE_NAME,
8421 .id_table = tg3_pci_tbl,
8422 .probe = tg3_init_one,
8423 .remove = __devexit_p(tg3_remove_one),
8424 .suspend = tg3_suspend,
8425 .resume = tg3_resume
8428 static int __init tg3_init(void)
8430 return pci_module_init(&tg3_driver);
8433 static void __exit tg3_cleanup(void)
8435 pci_unregister_driver(&tg3_driver);
8438 module_init(tg3_init);
8439 module_exit(tg3_cleanup);