2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9 #include <linux/config.h>
11 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/if_vlan.h>
28 #include <linux/tcp.h>
29 #include <linux/workqueue.h>
31 #include <net/checksum.h>
33 #include <asm/system.h>
35 #include <asm/byteorder.h>
36 #include <asm/uaccess.h>
39 #include <asm/idprom.h>
40 #include <asm/oplib.h>
44 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
45 #define TG3_VLAN_TAG_USED 1
47 #define TG3_VLAN_TAG_USED 0
51 #define TG3_TSO_SUPPORT 1
53 #define TG3_TSO_SUPPORT 0
58 #define DRV_MODULE_NAME "tg3"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "3.8"
61 #define DRV_MODULE_RELDATE "July 14, 2004"
63 #define TG3_DEF_MAC_MODE 0
64 #define TG3_DEF_RX_MODE 0
65 #define TG3_DEF_TX_MODE 0
66 #define TG3_DEF_MSG_ENABLE \
76 /* length of time before we decide the hardware is borked,
77 * and dev->tx_timeout() should be called to fix the problem
79 #define TG3_TX_TIMEOUT (5 * HZ)
81 /* hardware minimum and maximum for a single frame's data payload */
82 #define TG3_MIN_MTU 60
83 #define TG3_MAX_MTU(tp) \
84 ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
85 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
87 /* These numbers seem to be hard coded in the NIC firmware somehow.
88 * You can't change the ring sizes, but you can change where you place
89 * them in the NIC onboard memory.
91 #define TG3_RX_RING_SIZE 512
92 #define TG3_DEF_RX_RING_PENDING 200
93 #define TG3_RX_JUMBO_RING_SIZE 256
94 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
96 /* Do not place this n-ring entries value into the tp struct itself,
97 * we really want to expose these constants to GCC so that modulo et
98 * al. operations are done with shifts and masks instead of with
99 * hw multiply/modulo instructions. Another solution would be to
100 * replace things like '% foo' with '& (foo - 1)'.
102 #define TG3_RX_RCB_RING_SIZE(tp) \
103 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
107 #define TG3_TX_RING_SIZE 512
108 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
110 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
112 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
113 TG3_RX_JUMBO_RING_SIZE)
114 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RCB_RING_SIZE(tp))
116 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
118 #define TX_RING_GAP(TP) \
119 (TG3_TX_RING_SIZE - (TP)->tx_pending)
120 #define TX_BUFFS_AVAIL(TP) \
121 (((TP)->tx_cons <= (TP)->tx_prod) ? \
122 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
123 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
124 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_PARM(tg3_debug, "i");
142 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
144 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 static struct pci_device_id tg3_pci_tbl[] = {
147 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225 char string[ETH_GSTRING_LEN];
226 } ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_ucast_packets" },
230 { "rx_mcast_packets" },
231 { "rx_bcast_packets" },
233 { "rx_align_errors" },
234 { "rx_xon_pause_rcvd" },
235 { "rx_xoff_pause_rcvd" },
236 { "rx_mac_ctrl_rcvd" },
237 { "rx_xoff_entered" },
238 { "rx_frame_too_long_errors" },
240 { "rx_undersize_packets" },
241 { "rx_in_length_errors" },
242 { "rx_out_length_errors" },
243 { "rx_64_or_less_octet_packets" },
244 { "rx_65_to_127_octet_packets" },
245 { "rx_128_to_255_octet_packets" },
246 { "rx_256_to_511_octet_packets" },
247 { "rx_512_to_1023_octet_packets" },
248 { "rx_1024_to_1522_octet_packets" },
249 { "rx_1523_to_2047_octet_packets" },
250 { "rx_2048_to_4095_octet_packets" },
251 { "rx_4096_to_8191_octet_packets" },
252 { "rx_8192_to_9022_octet_packets" },
259 { "tx_flow_control" },
261 { "tx_single_collisions" },
262 { "tx_mult_collisions" },
264 { "tx_excessive_collisions" },
265 { "tx_late_collisions" },
266 { "tx_collide_2times" },
267 { "tx_collide_3times" },
268 { "tx_collide_4times" },
269 { "tx_collide_5times" },
270 { "tx_collide_6times" },
271 { "tx_collide_7times" },
272 { "tx_collide_8times" },
273 { "tx_collide_9times" },
274 { "tx_collide_10times" },
275 { "tx_collide_11times" },
276 { "tx_collide_12times" },
277 { "tx_collide_13times" },
278 { "tx_collide_14times" },
279 { "tx_collide_15times" },
280 { "tx_ucast_packets" },
281 { "tx_mcast_packets" },
282 { "tx_bcast_packets" },
283 { "tx_carrier_sense_errors" },
287 { "dma_writeq_full" },
288 { "dma_write_prioq_full" },
292 { "rx_threshold_hit" },
294 { "dma_readq_full" },
295 { "dma_read_prioq_full" },
296 { "tx_comp_queue_full" },
298 { "ring_set_send_prod_index" },
299 { "ring_status_update" },
301 { "nic_avoided_irqs" },
302 { "nic_tx_threshold_hit" }
305 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
307 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
310 spin_lock_irqsave(&tp->indirect_lock, flags);
311 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
312 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
313 spin_unlock_irqrestore(&tp->indirect_lock, flags);
315 writel(val, tp->regs + off);
316 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
317 readl(tp->regs + off);
321 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
323 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
326 spin_lock_irqsave(&tp->indirect_lock, flags);
327 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
328 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
329 spin_unlock_irqrestore(&tp->indirect_lock, flags);
331 unsigned long dest = tp->regs + off;
333 readl(dest); /* always flush PCI write */
337 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
339 unsigned long mbox = tp->regs + off;
341 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
345 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
347 unsigned long mbox = tp->regs + off;
349 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
351 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
355 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
356 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
357 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
359 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
360 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
361 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
362 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
363 #define tr32(reg) readl(tp->regs + (reg))
364 #define tr16(reg) readw(tp->regs + (reg))
365 #define tr8(reg) readb(tp->regs + (reg))
367 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
371 spin_lock_irqsave(&tp->indirect_lock, flags);
372 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
373 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
375 /* Always leave this as zero. */
376 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
377 spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
384 spin_lock_irqsave(&tp->indirect_lock, flags);
385 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
386 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
388 /* Always leave this as zero. */
389 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
390 spin_unlock_irqrestore(&tp->indirect_lock, flags);
393 static void tg3_disable_ints(struct tg3 *tp)
395 tw32(TG3PCI_MISC_HOST_CTRL,
396 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
397 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
398 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
401 static inline void tg3_cond_int(struct tg3 *tp)
403 if (tp->hw_status->status & SD_STATUS_UPDATED)
404 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
407 static void tg3_enable_ints(struct tg3 *tp)
409 tw32(TG3PCI_MISC_HOST_CTRL,
410 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
411 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
412 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
417 static inline void tg3_netif_stop(struct tg3 *tp)
419 netif_poll_disable(tp->dev);
420 netif_tx_disable(tp->dev);
423 static inline void tg3_netif_start(struct tg3 *tp)
425 netif_wake_queue(tp->dev);
426 /* NOTE: unconditional netif_wake_queue is only appropriate
427 * so long as all callers are assured to have free tx slots
428 * (such as after tg3_init_hw)
430 netif_poll_enable(tp->dev);
434 static void tg3_switch_clocks(struct tg3 *tp)
436 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
439 orig_clock_ctrl = clock_ctrl;
440 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
441 CLOCK_CTRL_CLKRUN_OENABLE |
443 tp->pci_clock_ctrl = clock_ctrl;
445 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
447 (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
448 tw32_f(TG3PCI_CLOCK_CTRL,
450 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
452 tw32_f(TG3PCI_CLOCK_CTRL,
453 clock_ctrl | (CLOCK_CTRL_ALTCLK));
456 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
460 #define PHY_BUSY_LOOPS 5000
462 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
467 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
469 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
475 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
476 MI_COM_PHY_ADDR_MASK);
477 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
478 MI_COM_REG_ADDR_MASK);
479 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
481 tw32_f(MAC_MI_COM, frame_val);
483 loops = PHY_BUSY_LOOPS;
484 while (loops-- > 0) {
486 frame_val = tr32(MAC_MI_COM);
488 if ((frame_val & MI_COM_BUSY) == 0) {
490 frame_val = tr32(MAC_MI_COM);
497 *val = frame_val & MI_COM_DATA_MASK;
501 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
502 tw32_f(MAC_MI_MODE, tp->mi_mode);
509 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
514 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
516 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
520 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
521 MI_COM_PHY_ADDR_MASK);
522 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
523 MI_COM_REG_ADDR_MASK);
524 frame_val |= (val & MI_COM_DATA_MASK);
525 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
527 tw32_f(MAC_MI_COM, frame_val);
529 loops = PHY_BUSY_LOOPS;
530 while (loops-- > 0) {
532 frame_val = tr32(MAC_MI_COM);
533 if ((frame_val & MI_COM_BUSY) == 0) {
535 frame_val = tr32(MAC_MI_COM);
544 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
545 tw32_f(MAC_MI_MODE, tp->mi_mode);
552 static void tg3_phy_set_wirespeed(struct tg3 *tp)
556 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
559 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
560 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
561 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
564 static int tg3_bmcr_reset(struct tg3 *tp)
569 /* OK, reset it, and poll the BMCR_RESET bit until it
570 * clears or we time out.
572 phy_control = BMCR_RESET;
573 err = tg3_writephy(tp, MII_BMCR, phy_control);
579 err = tg3_readphy(tp, MII_BMCR, &phy_control);
583 if ((phy_control & BMCR_RESET) == 0) {
595 static int tg3_wait_macro_done(struct tg3 *tp)
602 tg3_readphy(tp, 0x16, &tmp32);
603 if ((tmp32 & 0x1000) == 0)
612 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
614 static const u32 test_pat[4][6] = {
615 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
616 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
617 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
618 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
622 for (chan = 0; chan < 4; chan++) {
625 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
626 (chan * 0x2000) | 0x0200);
627 tg3_writephy(tp, 0x16, 0x0002);
629 for (i = 0; i < 6; i++)
630 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
633 tg3_writephy(tp, 0x16, 0x0202);
634 if (tg3_wait_macro_done(tp)) {
639 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
640 (chan * 0x2000) | 0x0200);
641 tg3_writephy(tp, 0x16, 0x0082);
642 if (tg3_wait_macro_done(tp)) {
647 tg3_writephy(tp, 0x16, 0x0802);
648 if (tg3_wait_macro_done(tp)) {
653 for (i = 0; i < 6; i += 2) {
656 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
657 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
658 if (tg3_wait_macro_done(tp)) {
664 if (low != test_pat[chan][i] ||
665 high != test_pat[chan][i+1]) {
666 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
668 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
678 static int tg3_phy_reset_chanpat(struct tg3 *tp)
682 for (chan = 0; chan < 4; chan++) {
685 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
686 (chan * 0x2000) | 0x0200);
687 tg3_writephy(tp, 0x16, 0x0002);
688 for (i = 0; i < 6; i++)
689 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
690 tg3_writephy(tp, 0x16, 0x0202);
691 if (tg3_wait_macro_done(tp))
698 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
700 u32 reg32, phy9_orig;
701 int retries, do_phy_reset, err;
707 err = tg3_bmcr_reset(tp);
713 /* Disable transmitter and interrupt. */
714 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
716 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
718 /* Set full-duplex, 1000 mbps. */
719 tg3_writephy(tp, MII_BMCR,
720 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
722 /* Set to master mode. */
723 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
724 tg3_writephy(tp, MII_TG3_CTRL,
725 (MII_TG3_CTRL_AS_MASTER |
726 MII_TG3_CTRL_ENABLE_AS_MASTER));
728 /* Enable SM_DSP_CLOCK and 6dB. */
729 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
731 /* Block the PHY control access. */
732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
735 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
740 err = tg3_phy_reset_chanpat(tp);
744 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
745 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
747 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
748 tg3_writephy(tp, 0x16, 0x0000);
750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
752 /* Set Extended packet length bit for jumbo frames */
753 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
756 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
759 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
761 tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
763 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
768 /* This will reset the tigon3 PHY if there is no valid
769 * link unless the FORCE argument is non-zero.
771 static int tg3_phy_reset(struct tg3 *tp)
776 err = tg3_readphy(tp, MII_BMSR, &phy_status);
777 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
784 err = tg3_phy_reset_5703_4_5(tp);
790 err = tg3_bmcr_reset(tp);
795 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
796 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
797 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
798 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
799 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
800 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
801 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
803 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
804 tg3_writephy(tp, 0x1c, 0x8d68);
805 tg3_writephy(tp, 0x1c, 0x8d68);
807 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
808 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
811 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
812 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
813 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
814 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
815 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
817 /* Set Extended packet length bit (bit 14) on all chips that */
818 /* support jumbo frames */
819 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
820 /* Cannot do read-modify-write on 5401 */
821 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
822 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
823 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
826 /* Set bit 14 with read-modify-write to preserve other bits */
827 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
828 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
829 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
831 tg3_phy_set_wirespeed(tp);
835 static void tg3_frob_aux_power(struct tg3 *tp)
837 struct tg3 *tp_peer = tp;
839 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
843 tp_peer = pci_get_drvdata(tp->pdev_peer);
849 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
850 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
853 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
854 (GRC_LCLCTRL_GPIO_OE0 |
855 GRC_LCLCTRL_GPIO_OE1 |
856 GRC_LCLCTRL_GPIO_OE2 |
857 GRC_LCLCTRL_GPIO_OUTPUT0 |
858 GRC_LCLCTRL_GPIO_OUTPUT1));
862 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
865 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
866 (GRC_LCLCTRL_GPIO_OE0 |
867 GRC_LCLCTRL_GPIO_OE1 |
868 GRC_LCLCTRL_GPIO_OE2 |
869 GRC_LCLCTRL_GPIO_OUTPUT1 |
870 GRC_LCLCTRL_GPIO_OUTPUT2));
873 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
874 (GRC_LCLCTRL_GPIO_OE0 |
875 GRC_LCLCTRL_GPIO_OE1 |
876 GRC_LCLCTRL_GPIO_OE2 |
877 GRC_LCLCTRL_GPIO_OUTPUT0 |
878 GRC_LCLCTRL_GPIO_OUTPUT1 |
879 GRC_LCLCTRL_GPIO_OUTPUT2));
882 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
883 (GRC_LCLCTRL_GPIO_OE0 |
884 GRC_LCLCTRL_GPIO_OE1 |
885 GRC_LCLCTRL_GPIO_OE2 |
886 GRC_LCLCTRL_GPIO_OUTPUT0 |
887 GRC_LCLCTRL_GPIO_OUTPUT1));
891 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
892 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
894 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
897 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
898 (GRC_LCLCTRL_GPIO_OE1 |
899 GRC_LCLCTRL_GPIO_OUTPUT1));
902 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
903 (GRC_LCLCTRL_GPIO_OE1));
906 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907 (GRC_LCLCTRL_GPIO_OE1 |
908 GRC_LCLCTRL_GPIO_OUTPUT1));
914 static int tg3_setup_phy(struct tg3 *, int);
916 #define RESET_KIND_SHUTDOWN 0
917 #define RESET_KIND_INIT 1
918 #define RESET_KIND_SUSPEND 2
920 static void tg3_write_sig_post_reset(struct tg3 *, int);
922 static int tg3_set_power_state(struct tg3 *tp, int state)
925 u16 power_control, power_caps;
928 /* Make sure register accesses (indirect or otherwise)
929 * will function correctly.
931 pci_write_config_dword(tp->pdev,
932 TG3PCI_MISC_HOST_CTRL,
935 pci_read_config_word(tp->pdev,
938 power_control |= PCI_PM_CTRL_PME_STATUS;
939 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
943 pci_write_config_word(tp->pdev,
946 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
964 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
966 tp->dev->name, state);
970 power_control |= PCI_PM_CTRL_PME_ENABLE;
972 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
973 tw32(TG3PCI_MISC_HOST_CTRL,
974 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
976 if (tp->link_config.phy_is_low_power == 0) {
977 tp->link_config.phy_is_low_power = 1;
978 tp->link_config.orig_speed = tp->link_config.speed;
979 tp->link_config.orig_duplex = tp->link_config.duplex;
980 tp->link_config.orig_autoneg = tp->link_config.autoneg;
983 if (tp->phy_id != PHY_ID_SERDES) {
984 tp->link_config.speed = SPEED_10;
985 tp->link_config.duplex = DUPLEX_HALF;
986 tp->link_config.autoneg = AUTONEG_ENABLE;
987 tg3_setup_phy(tp, 0);
990 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
992 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
995 if (tp->phy_id != PHY_ID_SERDES) {
996 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
999 mac_mode = MAC_MODE_PORT_MODE_MII;
1001 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1002 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1003 mac_mode |= MAC_MODE_LINK_POLARITY;
1005 mac_mode = MAC_MODE_PORT_MODE_TBI;
1008 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1009 tw32(MAC_LED_CTRL, tp->led_ctrl);
1011 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1012 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1013 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1015 tw32_f(MAC_MODE, mac_mode);
1018 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1022 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1023 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1027 base_val = tp->pci_clock_ctrl;
1028 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1029 CLOCK_CTRL_TXCLK_DISABLE);
1031 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1033 CLOCK_CTRL_PWRDOWN_PLL133);
1035 } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1036 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1037 u32 newbits1, newbits2;
1039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1041 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1042 CLOCK_CTRL_TXCLK_DISABLE |
1044 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1045 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1047 newbits1 = CLOCK_CTRL_625_CORE;
1048 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1050 newbits1 = CLOCK_CTRL_ALTCLK;
1051 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1054 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1057 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1060 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1061 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1066 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1067 CLOCK_CTRL_TXCLK_DISABLE |
1068 CLOCK_CTRL_44MHZ_CORE);
1070 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1073 tw32_f(TG3PCI_CLOCK_CTRL,
1074 tp->pci_clock_ctrl | newbits3);
1079 tg3_frob_aux_power(tp);
1081 /* Finally, set the new power state. */
1082 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1084 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1089 static void tg3_link_report(struct tg3 *tp)
1091 if (!netif_carrier_ok(tp->dev)) {
1092 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1094 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1096 (tp->link_config.active_speed == SPEED_1000 ?
1098 (tp->link_config.active_speed == SPEED_100 ?
1100 (tp->link_config.active_duplex == DUPLEX_FULL ?
1103 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1106 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1107 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1111 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1113 u32 new_tg3_flags = 0;
1114 u32 old_rx_mode = tp->rx_mode;
1115 u32 old_tx_mode = tp->tx_mode;
1117 if (local_adv & ADVERTISE_PAUSE_CAP) {
1118 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1119 if (remote_adv & LPA_PAUSE_CAP)
1121 (TG3_FLAG_RX_PAUSE |
1123 else if (remote_adv & LPA_PAUSE_ASYM)
1125 (TG3_FLAG_RX_PAUSE);
1127 if (remote_adv & LPA_PAUSE_CAP)
1129 (TG3_FLAG_RX_PAUSE |
1132 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1133 if ((remote_adv & LPA_PAUSE_CAP) &&
1134 (remote_adv & LPA_PAUSE_ASYM))
1135 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1138 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1139 tp->tg3_flags |= new_tg3_flags;
1141 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1142 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1144 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1146 if (old_rx_mode != tp->rx_mode) {
1147 tw32_f(MAC_RX_MODE, tp->rx_mode);
1150 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1151 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1153 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1155 if (old_tx_mode != tp->tx_mode) {
1156 tw32_f(MAC_TX_MODE, tp->tx_mode);
1160 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1162 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1163 case MII_TG3_AUX_STAT_10HALF:
1165 *duplex = DUPLEX_HALF;
1168 case MII_TG3_AUX_STAT_10FULL:
1170 *duplex = DUPLEX_FULL;
1173 case MII_TG3_AUX_STAT_100HALF:
1175 *duplex = DUPLEX_HALF;
1178 case MII_TG3_AUX_STAT_100FULL:
1180 *duplex = DUPLEX_FULL;
1183 case MII_TG3_AUX_STAT_1000HALF:
1184 *speed = SPEED_1000;
1185 *duplex = DUPLEX_HALF;
1188 case MII_TG3_AUX_STAT_1000FULL:
1189 *speed = SPEED_1000;
1190 *duplex = DUPLEX_FULL;
1194 *speed = SPEED_INVALID;
1195 *duplex = DUPLEX_INVALID;
1200 static int tg3_phy_copper_begin(struct tg3 *tp)
1205 if (tp->link_config.phy_is_low_power) {
1206 /* Entering low power mode. Disable gigabit and
1207 * 100baseT advertisements.
1209 tg3_writephy(tp, MII_TG3_CTRL, 0);
1211 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1212 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1213 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1214 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1216 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1217 } else if (tp->link_config.speed == SPEED_INVALID) {
1218 tp->link_config.advertising =
1219 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1220 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1221 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1222 ADVERTISED_Autoneg | ADVERTISED_MII);
1224 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1225 tp->link_config.advertising &=
1226 ~(ADVERTISED_1000baseT_Half |
1227 ADVERTISED_1000baseT_Full);
1229 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1230 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1231 new_adv |= ADVERTISE_10HALF;
1232 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1233 new_adv |= ADVERTISE_10FULL;
1234 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1235 new_adv |= ADVERTISE_100HALF;
1236 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1237 new_adv |= ADVERTISE_100FULL;
1238 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1240 if (tp->link_config.advertising &
1241 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1243 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1244 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1245 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1246 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1247 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1248 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1249 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1250 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1251 MII_TG3_CTRL_ENABLE_AS_MASTER);
1252 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1254 tg3_writephy(tp, MII_TG3_CTRL, 0);
1257 /* Asking for a specific link mode. */
1258 if (tp->link_config.speed == SPEED_1000) {
1259 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1260 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1262 if (tp->link_config.duplex == DUPLEX_FULL)
1263 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1265 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1266 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1267 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1268 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1269 MII_TG3_CTRL_ENABLE_AS_MASTER);
1270 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1272 tg3_writephy(tp, MII_TG3_CTRL, 0);
1274 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1275 if (tp->link_config.speed == SPEED_100) {
1276 if (tp->link_config.duplex == DUPLEX_FULL)
1277 new_adv |= ADVERTISE_100FULL;
1279 new_adv |= ADVERTISE_100HALF;
1281 if (tp->link_config.duplex == DUPLEX_FULL)
1282 new_adv |= ADVERTISE_10FULL;
1284 new_adv |= ADVERTISE_10HALF;
1286 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1290 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1291 tp->link_config.speed != SPEED_INVALID) {
1292 u32 bmcr, orig_bmcr;
1294 tp->link_config.active_speed = tp->link_config.speed;
1295 tp->link_config.active_duplex = tp->link_config.duplex;
1298 switch (tp->link_config.speed) {
1304 bmcr |= BMCR_SPEED100;
1308 bmcr |= TG3_BMCR_SPEED1000;
1312 if (tp->link_config.duplex == DUPLEX_FULL)
1313 bmcr |= BMCR_FULLDPLX;
1315 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1316 if (bmcr != orig_bmcr) {
1317 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1318 for (i = 0; i < 1500; i++) {
1322 tg3_readphy(tp, MII_BMSR, &tmp);
1323 tg3_readphy(tp, MII_BMSR, &tmp);
1324 if (!(tmp & BMSR_LSTATUS)) {
1329 tg3_writephy(tp, MII_BMCR, bmcr);
1333 tg3_writephy(tp, MII_BMCR,
1334 BMCR_ANENABLE | BMCR_ANRESTART);
1340 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1344 /* Turn off tap power management. */
1345 /* Set Extended packet length bit */
1346 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1348 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1349 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1351 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1352 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1354 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1355 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1357 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1358 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1360 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1361 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1368 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1370 u32 adv_reg, all_mask;
1372 tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1373 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1374 ADVERTISE_100HALF | ADVERTISE_100FULL);
1375 if ((adv_reg & all_mask) != all_mask)
1377 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1380 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1381 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1382 MII_TG3_CTRL_ADV_1000_FULL);
1383 if ((tg3_ctrl & all_mask) != all_mask)
1389 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1391 int current_link_up;
1400 (MAC_STATUS_SYNC_CHANGED |
1401 MAC_STATUS_CFG_CHANGED |
1402 MAC_STATUS_MI_COMPLETION |
1403 MAC_STATUS_LNKSTATE_CHANGED));
1406 tp->mi_mode = MAC_MI_MODE_BASE;
1407 tw32_f(MAC_MI_MODE, tp->mi_mode);
1410 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1412 /* Some third-party PHYs need to be reset on link going
1415 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1418 netif_carrier_ok(tp->dev)) {
1419 tg3_readphy(tp, MII_BMSR, &bmsr);
1420 tg3_readphy(tp, MII_BMSR, &bmsr);
1421 if (!(bmsr & BMSR_LSTATUS))
1427 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1428 tg3_readphy(tp, MII_BMSR, &bmsr);
1429 tg3_readphy(tp, MII_BMSR, &bmsr);
1431 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1434 if (!(bmsr & BMSR_LSTATUS)) {
1435 err = tg3_init_5401phy_dsp(tp);
1439 tg3_readphy(tp, MII_BMSR, &bmsr);
1440 for (i = 0; i < 1000; i++) {
1442 tg3_readphy(tp, MII_BMSR, &bmsr);
1443 if (bmsr & BMSR_LSTATUS) {
1449 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1450 !(bmsr & BMSR_LSTATUS) &&
1451 tp->link_config.active_speed == SPEED_1000) {
1452 err = tg3_phy_reset(tp);
1454 err = tg3_init_5401phy_dsp(tp);
1459 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1460 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1461 /* 5701 {A0,B0} CRC bug workaround */
1462 tg3_writephy(tp, 0x15, 0x0a75);
1463 tg3_writephy(tp, 0x1c, 0x8c68);
1464 tg3_writephy(tp, 0x1c, 0x8d68);
1465 tg3_writephy(tp, 0x1c, 0x8c68);
1468 /* Clear pending interrupts... */
1469 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1470 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1472 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1473 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1475 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1479 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1480 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1481 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1483 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1486 current_link_up = 0;
1487 current_speed = SPEED_INVALID;
1488 current_duplex = DUPLEX_INVALID;
1491 for (i = 0; i < 100; i++) {
1492 tg3_readphy(tp, MII_BMSR, &bmsr);
1493 tg3_readphy(tp, MII_BMSR, &bmsr);
1494 if (bmsr & BMSR_LSTATUS)
1499 if (bmsr & BMSR_LSTATUS) {
1502 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1503 for (i = 0; i < 2000; i++) {
1505 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1510 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1515 for (i = 0; i < 200; i++) {
1516 tg3_readphy(tp, MII_BMCR, &bmcr);
1517 tg3_readphy(tp, MII_BMCR, &bmcr);
1518 if (bmcr && bmcr != 0x7fff)
1523 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1524 if (bmcr & BMCR_ANENABLE) {
1525 current_link_up = 1;
1527 /* Force autoneg restart if we are exiting
1530 if (!tg3_copper_is_advertising_all(tp))
1531 current_link_up = 0;
1533 current_link_up = 0;
1536 if (!(bmcr & BMCR_ANENABLE) &&
1537 tp->link_config.speed == current_speed &&
1538 tp->link_config.duplex == current_duplex) {
1539 current_link_up = 1;
1541 current_link_up = 0;
1545 tp->link_config.active_speed = current_speed;
1546 tp->link_config.active_duplex = current_duplex;
1549 if (current_link_up == 1 &&
1550 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1551 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1552 u32 local_adv, remote_adv;
1554 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1555 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1557 tg3_readphy(tp, MII_LPA, &remote_adv);
1558 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1560 /* If we are not advertising full pause capability,
1561 * something is wrong. Bring the link down and reconfigure.
1563 if (local_adv != ADVERTISE_PAUSE_CAP) {
1564 current_link_up = 0;
1566 tg3_setup_flow_control(tp, local_adv, remote_adv);
1570 if (current_link_up == 0) {
1573 tg3_phy_copper_begin(tp);
1575 tg3_readphy(tp, MII_BMSR, &tmp);
1576 tg3_readphy(tp, MII_BMSR, &tmp);
1577 if (tmp & BMSR_LSTATUS)
1578 current_link_up = 1;
1581 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1582 if (current_link_up == 1) {
1583 if (tp->link_config.active_speed == SPEED_100 ||
1584 tp->link_config.active_speed == SPEED_10)
1585 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1587 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1589 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1591 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1592 if (tp->link_config.active_duplex == DUPLEX_HALF)
1593 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1595 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1597 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1598 (current_link_up == 1 &&
1599 tp->link_config.active_speed == SPEED_10))
1600 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1602 if (current_link_up == 1)
1603 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1606 /* ??? Without this setting Netgear GA302T PHY does not
1607 * ??? send/receive packets...
1609 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1610 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1611 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1612 tw32_f(MAC_MI_MODE, tp->mi_mode);
1616 tw32_f(MAC_MODE, tp->mac_mode);
1619 if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
1620 /* Polled via timer. */
1621 tw32_f(MAC_EVENT, 0);
1623 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1628 current_link_up == 1 &&
1629 tp->link_config.active_speed == SPEED_1000 &&
1630 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1631 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1634 (MAC_STATUS_SYNC_CHANGED |
1635 MAC_STATUS_CFG_CHANGED));
1638 NIC_SRAM_FIRMWARE_MBOX,
1639 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1642 if (current_link_up != netif_carrier_ok(tp->dev)) {
1643 if (current_link_up)
1644 netif_carrier_on(tp->dev);
1646 netif_carrier_off(tp->dev);
1647 tg3_link_report(tp);
1653 struct tg3_fiber_aneginfo {
1655 #define ANEG_STATE_UNKNOWN 0
1656 #define ANEG_STATE_AN_ENABLE 1
1657 #define ANEG_STATE_RESTART_INIT 2
1658 #define ANEG_STATE_RESTART 3
1659 #define ANEG_STATE_DISABLE_LINK_OK 4
1660 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1661 #define ANEG_STATE_ABILITY_DETECT 6
1662 #define ANEG_STATE_ACK_DETECT_INIT 7
1663 #define ANEG_STATE_ACK_DETECT 8
1664 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1665 #define ANEG_STATE_COMPLETE_ACK 10
1666 #define ANEG_STATE_IDLE_DETECT_INIT 11
1667 #define ANEG_STATE_IDLE_DETECT 12
1668 #define ANEG_STATE_LINK_OK 13
1669 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1670 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1673 #define MR_AN_ENABLE 0x00000001
1674 #define MR_RESTART_AN 0x00000002
1675 #define MR_AN_COMPLETE 0x00000004
1676 #define MR_PAGE_RX 0x00000008
1677 #define MR_NP_LOADED 0x00000010
1678 #define MR_TOGGLE_TX 0x00000020
1679 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1680 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1681 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1682 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1683 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1684 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1685 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1686 #define MR_TOGGLE_RX 0x00002000
1687 #define MR_NP_RX 0x00004000
1689 #define MR_LINK_OK 0x80000000
1691 unsigned long link_time, cur_time;
1693 u32 ability_match_cfg;
1694 int ability_match_count;
1696 char ability_match, idle_match, ack_match;
1698 u32 txconfig, rxconfig;
1699 #define ANEG_CFG_NP 0x00000080
1700 #define ANEG_CFG_ACK 0x00000040
1701 #define ANEG_CFG_RF2 0x00000020
1702 #define ANEG_CFG_RF1 0x00000010
1703 #define ANEG_CFG_PS2 0x00000001
1704 #define ANEG_CFG_PS1 0x00008000
1705 #define ANEG_CFG_HD 0x00004000
1706 #define ANEG_CFG_FD 0x00002000
1707 #define ANEG_CFG_INVAL 0x00001f06
1712 #define ANEG_TIMER_ENAB 2
1713 #define ANEG_FAILED -1
1715 #define ANEG_STATE_SETTLE_TIME 10000
1717 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1718 struct tg3_fiber_aneginfo *ap)
1720 unsigned long delta;
1724 if (ap->state == ANEG_STATE_UNKNOWN) {
1728 ap->ability_match_cfg = 0;
1729 ap->ability_match_count = 0;
1730 ap->ability_match = 0;
1736 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1737 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1739 if (rx_cfg_reg != ap->ability_match_cfg) {
1740 ap->ability_match_cfg = rx_cfg_reg;
1741 ap->ability_match = 0;
1742 ap->ability_match_count = 0;
1744 if (++ap->ability_match_count > 1) {
1745 ap->ability_match = 1;
1746 ap->ability_match_cfg = rx_cfg_reg;
1749 if (rx_cfg_reg & ANEG_CFG_ACK)
1757 ap->ability_match_cfg = 0;
1758 ap->ability_match_count = 0;
1759 ap->ability_match = 0;
1765 ap->rxconfig = rx_cfg_reg;
1769 case ANEG_STATE_UNKNOWN:
1770 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1771 ap->state = ANEG_STATE_AN_ENABLE;
1774 case ANEG_STATE_AN_ENABLE:
1775 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1776 if (ap->flags & MR_AN_ENABLE) {
1779 ap->ability_match_cfg = 0;
1780 ap->ability_match_count = 0;
1781 ap->ability_match = 0;
1785 ap->state = ANEG_STATE_RESTART_INIT;
1787 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1791 case ANEG_STATE_RESTART_INIT:
1792 ap->link_time = ap->cur_time;
1793 ap->flags &= ~(MR_NP_LOADED);
1795 tw32(MAC_TX_AUTO_NEG, 0);
1796 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1797 tw32_f(MAC_MODE, tp->mac_mode);
1800 ret = ANEG_TIMER_ENAB;
1801 ap->state = ANEG_STATE_RESTART;
1804 case ANEG_STATE_RESTART:
1805 delta = ap->cur_time - ap->link_time;
1806 if (delta > ANEG_STATE_SETTLE_TIME) {
1807 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1809 ret = ANEG_TIMER_ENAB;
1813 case ANEG_STATE_DISABLE_LINK_OK:
1817 case ANEG_STATE_ABILITY_DETECT_INIT:
1818 ap->flags &= ~(MR_TOGGLE_TX);
1819 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1820 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1821 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1822 tw32_f(MAC_MODE, tp->mac_mode);
1825 ap->state = ANEG_STATE_ABILITY_DETECT;
1828 case ANEG_STATE_ABILITY_DETECT:
1829 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1830 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1834 case ANEG_STATE_ACK_DETECT_INIT:
1835 ap->txconfig |= ANEG_CFG_ACK;
1836 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1837 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1838 tw32_f(MAC_MODE, tp->mac_mode);
1841 ap->state = ANEG_STATE_ACK_DETECT;
1844 case ANEG_STATE_ACK_DETECT:
1845 if (ap->ack_match != 0) {
1846 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1847 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1848 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1850 ap->state = ANEG_STATE_AN_ENABLE;
1852 } else if (ap->ability_match != 0 &&
1853 ap->rxconfig == 0) {
1854 ap->state = ANEG_STATE_AN_ENABLE;
1858 case ANEG_STATE_COMPLETE_ACK_INIT:
1859 if (ap->rxconfig & ANEG_CFG_INVAL) {
1863 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1864 MR_LP_ADV_HALF_DUPLEX |
1865 MR_LP_ADV_SYM_PAUSE |
1866 MR_LP_ADV_ASYM_PAUSE |
1867 MR_LP_ADV_REMOTE_FAULT1 |
1868 MR_LP_ADV_REMOTE_FAULT2 |
1869 MR_LP_ADV_NEXT_PAGE |
1872 if (ap->rxconfig & ANEG_CFG_FD)
1873 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1874 if (ap->rxconfig & ANEG_CFG_HD)
1875 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1876 if (ap->rxconfig & ANEG_CFG_PS1)
1877 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1878 if (ap->rxconfig & ANEG_CFG_PS2)
1879 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1880 if (ap->rxconfig & ANEG_CFG_RF1)
1881 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1882 if (ap->rxconfig & ANEG_CFG_RF2)
1883 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1884 if (ap->rxconfig & ANEG_CFG_NP)
1885 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1887 ap->link_time = ap->cur_time;
1889 ap->flags ^= (MR_TOGGLE_TX);
1890 if (ap->rxconfig & 0x0008)
1891 ap->flags |= MR_TOGGLE_RX;
1892 if (ap->rxconfig & ANEG_CFG_NP)
1893 ap->flags |= MR_NP_RX;
1894 ap->flags |= MR_PAGE_RX;
1896 ap->state = ANEG_STATE_COMPLETE_ACK;
1897 ret = ANEG_TIMER_ENAB;
1900 case ANEG_STATE_COMPLETE_ACK:
1901 if (ap->ability_match != 0 &&
1902 ap->rxconfig == 0) {
1903 ap->state = ANEG_STATE_AN_ENABLE;
1906 delta = ap->cur_time - ap->link_time;
1907 if (delta > ANEG_STATE_SETTLE_TIME) {
1908 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1909 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1911 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1912 !(ap->flags & MR_NP_RX)) {
1913 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1921 case ANEG_STATE_IDLE_DETECT_INIT:
1922 ap->link_time = ap->cur_time;
1923 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1924 tw32_f(MAC_MODE, tp->mac_mode);
1927 ap->state = ANEG_STATE_IDLE_DETECT;
1928 ret = ANEG_TIMER_ENAB;
1931 case ANEG_STATE_IDLE_DETECT:
1932 if (ap->ability_match != 0 &&
1933 ap->rxconfig == 0) {
1934 ap->state = ANEG_STATE_AN_ENABLE;
1937 delta = ap->cur_time - ap->link_time;
1938 if (delta > ANEG_STATE_SETTLE_TIME) {
1939 /* XXX another gem from the Broadcom driver :( */
1940 ap->state = ANEG_STATE_LINK_OK;
1944 case ANEG_STATE_LINK_OK:
1945 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1949 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1950 /* ??? unimplemented */
1953 case ANEG_STATE_NEXT_PAGE_WAIT:
1954 /* ??? unimplemented */
1965 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
1969 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
1972 dig_status = tr32(SG_DIG_STATUS);
1974 if (dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
1975 *flags |= MR_LP_ADV_ASYM_PAUSE;
1976 if (dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
1977 *flags |= MR_LP_ADV_SYM_PAUSE;
1979 if ((dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1980 !(dig_status & (SG_DIG_AUTONEG_ERROR |
1981 SG_DIG_PARTNER_FAULT_MASK)))
1984 struct tg3_fiber_aneginfo aninfo;
1985 int status = ANEG_FAILED;
1989 tw32_f(MAC_TX_AUTO_NEG, 0);
1991 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1992 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1995 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1998 memset(&aninfo, 0, sizeof(aninfo));
1999 aninfo.flags |= MR_AN_ENABLE;
2000 aninfo.state = ANEG_STATE_UNKNOWN;
2001 aninfo.cur_time = 0;
2003 while (++tick < 195000) {
2004 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2005 if (status == ANEG_DONE || status == ANEG_FAILED)
2011 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2012 tw32_f(MAC_MODE, tp->mac_mode);
2015 *flags = aninfo.flags;
2017 if (status == ANEG_DONE &&
2018 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2019 MR_LP_ADV_FULL_DUPLEX)))
2026 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2029 u16 orig_active_speed;
2030 u8 orig_active_duplex;
2031 int current_link_up;
2035 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2036 TG3_FLAG_TX_PAUSE));
2037 orig_active_speed = tp->link_config.active_speed;
2038 orig_active_duplex = tp->link_config.active_duplex;
2040 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2041 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2045 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
2046 /* Allow time for the hardware to auto-negotiate (195ms) */
2047 unsigned int tick = 0;
2049 while (++tick < 195000) {
2050 if (tr32(SG_DIG_STATUS) & SG_DIG_AUTONEG_COMPLETE)
2055 printk(KERN_INFO PFX "%s: HW autoneg failed !\n",
2059 /* Reset when initting first time or we have a link. */
2060 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
2061 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2062 /* Set PLL lock range. */
2063 tg3_writephy(tp, 0x16, 0x8007);
2066 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2068 /* Wait for reset to complete. */
2069 /* XXX schedule_timeout() ... */
2070 for (i = 0; i < 500; i++)
2073 /* Config mode; select PMA/Ch 1 regs. */
2074 tg3_writephy(tp, 0x10, 0x8411);
2076 /* Enable auto-lock and comdet, select txclk for tx. */
2077 tg3_writephy(tp, 0x11, 0x0a10);
2079 tg3_writephy(tp, 0x18, 0x00a0);
2080 tg3_writephy(tp, 0x16, 0x41ff);
2082 /* Assert and deassert POR. */
2083 tg3_writephy(tp, 0x13, 0x0400);
2085 tg3_writephy(tp, 0x13, 0x0000);
2087 tg3_writephy(tp, 0x11, 0x0a50);
2089 tg3_writephy(tp, 0x11, 0x0a10);
2091 /* Wait for signal to stabilize */
2092 /* XXX schedule_timeout() ... */
2093 for (i = 0; i < 15000; i++)
2096 /* Deselect the channel register so we can read the PHYID
2099 tg3_writephy(tp, 0x10, 0x8011);
2102 /* Enable link change interrupt unless serdes polling. */
2103 if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
2104 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2106 tw32_f(MAC_EVENT, 0);
2109 current_link_up = 0;
2110 if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
2111 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2114 if (fiber_autoneg(tp, &flags)) {
2115 u32 local_adv, remote_adv;
2117 local_adv = ADVERTISE_PAUSE_CAP;
2119 if (flags & MR_LP_ADV_SYM_PAUSE)
2120 remote_adv |= LPA_PAUSE_CAP;
2121 if (flags & MR_LP_ADV_ASYM_PAUSE)
2122 remote_adv |= LPA_PAUSE_ASYM;
2124 tg3_setup_flow_control(tp, local_adv, remote_adv);
2127 TG3_FLAG_GOT_SERDES_FLOWCTL;
2128 current_link_up = 1;
2130 for (i = 0; i < 60; i++) {
2133 (MAC_STATUS_SYNC_CHANGED |
2134 MAC_STATUS_CFG_CHANGED));
2136 if ((tr32(MAC_STATUS) &
2137 (MAC_STATUS_SYNC_CHANGED |
2138 MAC_STATUS_CFG_CHANGED)) == 0)
2141 if (current_link_up == 0 &&
2142 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2143 current_link_up = 1;
2146 /* Forcing 1000FD link up. */
2147 current_link_up = 1;
2148 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2151 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2153 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2154 tw32_f(MAC_MODE, tp->mac_mode);
2157 tp->hw_status->status =
2158 (SD_STATUS_UPDATED |
2159 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2161 for (i = 0; i < 100; i++) {
2164 (MAC_STATUS_SYNC_CHANGED |
2165 MAC_STATUS_CFG_CHANGED));
2167 if ((tr32(MAC_STATUS) &
2168 (MAC_STATUS_SYNC_CHANGED |
2169 MAC_STATUS_CFG_CHANGED)) == 0)
2173 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
2174 current_link_up = 0;
2176 if (current_link_up == 1) {
2177 tp->link_config.active_speed = SPEED_1000;
2178 tp->link_config.active_duplex = DUPLEX_FULL;
2179 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2180 LED_CTRL_LNKLED_OVERRIDE |
2181 LED_CTRL_1000MBPS_ON));
2183 tp->link_config.active_speed = SPEED_INVALID;
2184 tp->link_config.active_duplex = DUPLEX_INVALID;
2185 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2186 LED_CTRL_LNKLED_OVERRIDE |
2187 LED_CTRL_TRAFFIC_OVERRIDE));
2190 if (current_link_up != netif_carrier_ok(tp->dev)) {
2191 if (current_link_up)
2192 netif_carrier_on(tp->dev);
2194 netif_carrier_off(tp->dev);
2195 tg3_link_report(tp);
2198 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2200 if (orig_pause_cfg != now_pause_cfg ||
2201 orig_active_speed != tp->link_config.active_speed ||
2202 orig_active_duplex != tp->link_config.active_duplex)
2203 tg3_link_report(tp);
2206 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
2207 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
2209 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
2210 tw32_f(MAC_MODE, tp->mac_mode);
2218 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2222 if (tp->phy_id == PHY_ID_SERDES) {
2223 err = tg3_setup_fiber_phy(tp, force_reset);
2225 err = tg3_setup_copper_phy(tp, force_reset);
2228 if (tp->link_config.active_speed == SPEED_1000 &&
2229 tp->link_config.active_duplex == DUPLEX_HALF)
2230 tw32(MAC_TX_LENGTHS,
2231 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2232 (6 << TX_LENGTHS_IPG_SHIFT) |
2233 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2235 tw32(MAC_TX_LENGTHS,
2236 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2237 (6 << TX_LENGTHS_IPG_SHIFT) |
2238 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2240 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2241 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2242 if (netif_carrier_ok(tp->dev)) {
2243 tw32(HOSTCC_STAT_COAL_TICKS,
2244 DEFAULT_STAT_COAL_TICKS);
2246 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2253 /* Tigon3 never reports partial packet sends. So we do not
2254 * need special logic to handle SKBs that have not had all
2255 * of their frags sent yet, like SunGEM does.
2257 static void tg3_tx(struct tg3 *tp)
2259 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2260 u32 sw_idx = tp->tx_cons;
2262 while (sw_idx != hw_idx) {
2263 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2264 struct sk_buff *skb = ri->skb;
2267 if (unlikely(skb == NULL))
2270 pci_unmap_single(tp->pdev,
2271 pci_unmap_addr(ri, mapping),
2277 sw_idx = NEXT_TX(sw_idx);
2279 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2280 if (unlikely(sw_idx == hw_idx))
2283 ri = &tp->tx_buffers[sw_idx];
2284 if (unlikely(ri->skb != NULL))
2287 pci_unmap_page(tp->pdev,
2288 pci_unmap_addr(ri, mapping),
2289 skb_shinfo(skb)->frags[i].size,
2292 sw_idx = NEXT_TX(sw_idx);
2295 dev_kfree_skb_irq(skb);
2298 tp->tx_cons = sw_idx;
2300 if (netif_queue_stopped(tp->dev) &&
2301 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2302 netif_wake_queue(tp->dev);
2305 /* Returns size of skb allocated or < 0 on error.
2307 * We only need to fill in the address because the other members
2308 * of the RX descriptor are invariant, see tg3_init_rings.
2310 * Note the purposeful assymetry of cpu vs. chip accesses. For
2311 * posting buffers we only dirty the first cache line of the RX
2312 * descriptor (containing the address). Whereas for the RX status
2313 * buffers the cpu only reads the last cacheline of the RX descriptor
2314 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2316 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2317 int src_idx, u32 dest_idx_unmasked)
2319 struct tg3_rx_buffer_desc *desc;
2320 struct ring_info *map, *src_map;
2321 struct sk_buff *skb;
2323 int skb_size, dest_idx;
2326 switch (opaque_key) {
2327 case RXD_OPAQUE_RING_STD:
2328 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2329 desc = &tp->rx_std[dest_idx];
2330 map = &tp->rx_std_buffers[dest_idx];
2332 src_map = &tp->rx_std_buffers[src_idx];
2333 skb_size = RX_PKT_BUF_SZ;
2336 case RXD_OPAQUE_RING_JUMBO:
2337 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2338 desc = &tp->rx_jumbo[dest_idx];
2339 map = &tp->rx_jumbo_buffers[dest_idx];
2341 src_map = &tp->rx_jumbo_buffers[src_idx];
2342 skb_size = RX_JUMBO_PKT_BUF_SZ;
2349 /* Do not overwrite any of the map or rp information
2350 * until we are sure we can commit to a new buffer.
2352 * Callers depend upon this behavior and assume that
2353 * we leave everything unchanged if we fail.
2355 skb = dev_alloc_skb(skb_size);
2360 skb_reserve(skb, tp->rx_offset);
2362 mapping = pci_map_single(tp->pdev, skb->data,
2363 skb_size - tp->rx_offset,
2364 PCI_DMA_FROMDEVICE);
2367 pci_unmap_addr_set(map, mapping, mapping);
2369 if (src_map != NULL)
2370 src_map->skb = NULL;
2372 desc->addr_hi = ((u64)mapping >> 32);
2373 desc->addr_lo = ((u64)mapping & 0xffffffff);
2378 /* We only need to move over in the address because the other
2379 * members of the RX descriptor are invariant. See notes above
2380 * tg3_alloc_rx_skb for full details.
2382 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2383 int src_idx, u32 dest_idx_unmasked)
2385 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2386 struct ring_info *src_map, *dest_map;
2389 switch (opaque_key) {
2390 case RXD_OPAQUE_RING_STD:
2391 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2392 dest_desc = &tp->rx_std[dest_idx];
2393 dest_map = &tp->rx_std_buffers[dest_idx];
2394 src_desc = &tp->rx_std[src_idx];
2395 src_map = &tp->rx_std_buffers[src_idx];
2398 case RXD_OPAQUE_RING_JUMBO:
2399 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2400 dest_desc = &tp->rx_jumbo[dest_idx];
2401 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2402 src_desc = &tp->rx_jumbo[src_idx];
2403 src_map = &tp->rx_jumbo_buffers[src_idx];
2410 dest_map->skb = src_map->skb;
2411 pci_unmap_addr_set(dest_map, mapping,
2412 pci_unmap_addr(src_map, mapping));
2413 dest_desc->addr_hi = src_desc->addr_hi;
2414 dest_desc->addr_lo = src_desc->addr_lo;
2416 src_map->skb = NULL;
2419 #if TG3_VLAN_TAG_USED
2420 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2422 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2426 /* The RX ring scheme is composed of multiple rings which post fresh
2427 * buffers to the chip, and one special ring the chip uses to report
2428 * status back to the host.
2430 * The special ring reports the status of received packets to the
2431 * host. The chip does not write into the original descriptor the
2432 * RX buffer was obtained from. The chip simply takes the original
2433 * descriptor as provided by the host, updates the status and length
2434 * field, then writes this into the next status ring entry.
2436 * Each ring the host uses to post buffers to the chip is described
2437 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2438 * it is first placed into the on-chip ram. When the packet's length
2439 * is known, it walks down the TG3_BDINFO entries to select the ring.
2440 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2441 * which is within the range of the new packet's length is chosen.
2443 * The "separate ring for rx status" scheme may sound queer, but it makes
2444 * sense from a cache coherency perspective. If only the host writes
2445 * to the buffer post rings, and only the chip writes to the rx status
2446 * rings, then cache lines never move beyond shared-modified state.
2447 * If both the host and chip were to write into the same ring, cache line
2448 * eviction could occur since both entities want it in an exclusive state.
2450 static int tg3_rx(struct tg3 *tp, int budget)
2453 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2457 hw_idx = tp->hw_status->idx[0].rx_producer;
2459 * We need to order the read of hw_idx and the read of
2460 * the opaque cookie.
2463 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2466 while (sw_idx != hw_idx && budget > 0) {
2467 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2469 struct sk_buff *skb;
2470 dma_addr_t dma_addr;
2471 u32 opaque_key, desc_idx, *post_ptr;
2473 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2474 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2475 if (opaque_key == RXD_OPAQUE_RING_STD) {
2476 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2478 skb = tp->rx_std_buffers[desc_idx].skb;
2479 post_ptr = &tp->rx_std_ptr;
2480 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2481 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2483 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2484 post_ptr = &tp->rx_jumbo_ptr;
2487 goto next_pkt_nopost;
2490 work_mask |= opaque_key;
2492 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2493 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2495 tg3_recycle_rx(tp, opaque_key,
2496 desc_idx, *post_ptr);
2498 /* Other statistics kept track of by card. */
2499 tp->net_stats.rx_dropped++;
2503 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2505 if (len > RX_COPY_THRESHOLD) {
2508 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2509 desc_idx, *post_ptr);
2513 pci_unmap_single(tp->pdev, dma_addr,
2514 skb_size - tp->rx_offset,
2515 PCI_DMA_FROMDEVICE);
2519 struct sk_buff *copy_skb;
2521 tg3_recycle_rx(tp, opaque_key,
2522 desc_idx, *post_ptr);
2524 copy_skb = dev_alloc_skb(len + 2);
2525 if (copy_skb == NULL)
2526 goto drop_it_no_recycle;
2528 copy_skb->dev = tp->dev;
2529 skb_reserve(copy_skb, 2);
2530 skb_put(copy_skb, len);
2531 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2532 memcpy(copy_skb->data, skb->data, len);
2533 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2535 /* We'll reuse the original ring buffer. */
2539 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2540 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2541 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2542 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2543 skb->ip_summed = CHECKSUM_UNNECESSARY;
2545 skb->ip_summed = CHECKSUM_NONE;
2547 skb->protocol = eth_type_trans(skb, tp->dev);
2548 #if TG3_VLAN_TAG_USED
2549 if (tp->vlgrp != NULL &&
2550 desc->type_flags & RXD_FLAG_VLAN) {
2551 tg3_vlan_rx(tp, skb,
2552 desc->err_vlan & RXD_VLAN_MASK);
2555 netif_receive_skb(skb);
2557 tp->dev->last_rx = jiffies;
2565 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2568 /* ACK the status ring. */
2569 tp->rx_rcb_ptr = rx_rcb_ptr;
2570 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2571 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2573 /* Refill RX ring(s). */
2574 if (work_mask & RXD_OPAQUE_RING_STD) {
2575 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2576 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2579 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2580 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2581 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2588 static int tg3_poll(struct net_device *netdev, int *budget)
2590 struct tg3 *tp = netdev_priv(netdev);
2591 struct tg3_hw_status *sblk = tp->hw_status;
2592 unsigned long flags;
2595 spin_lock_irqsave(&tp->lock, flags);
2597 /* handle link change and other phy events */
2598 if (!(tp->tg3_flags &
2599 (TG3_FLAG_USE_LINKCHG_REG |
2600 TG3_FLAG_POLL_SERDES))) {
2601 if (sblk->status & SD_STATUS_LINK_CHG) {
2602 sblk->status = SD_STATUS_UPDATED |
2603 (sblk->status & ~SD_STATUS_LINK_CHG);
2604 tg3_setup_phy(tp, 0);
2608 /* run TX completion thread */
2609 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2610 spin_lock(&tp->tx_lock);
2612 spin_unlock(&tp->tx_lock);
2615 spin_unlock_irqrestore(&tp->lock, flags);
2617 /* run RX thread, within the bounds set by NAPI.
2618 * All RX "locking" is done by ensuring outside
2619 * code synchronizes with dev->poll()
2622 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2623 int orig_budget = *budget;
2626 if (orig_budget > netdev->quota)
2627 orig_budget = netdev->quota;
2629 work_done = tg3_rx(tp, orig_budget);
2631 *budget -= work_done;
2632 netdev->quota -= work_done;
2634 if (work_done >= orig_budget)
2638 /* if no more work, tell net stack and NIC we're done */
2640 spin_lock_irqsave(&tp->lock, flags);
2641 __netif_rx_complete(netdev);
2642 tg3_enable_ints(tp);
2643 spin_unlock_irqrestore(&tp->lock, flags);
2646 return (done ? 0 : 1);
2649 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2651 struct tg3_hw_status *sblk = tp->hw_status;
2652 unsigned int work_exists = 0;
2654 /* check for phy events */
2655 if (!(tp->tg3_flags &
2656 (TG3_FLAG_USE_LINKCHG_REG |
2657 TG3_FLAG_POLL_SERDES))) {
2658 if (sblk->status & SD_STATUS_LINK_CHG)
2661 /* check for RX/TX work to do */
2662 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2663 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2669 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2671 struct net_device *dev = dev_id;
2672 struct tg3 *tp = netdev_priv(dev);
2673 struct tg3_hw_status *sblk = tp->hw_status;
2674 unsigned long flags;
2675 unsigned int handled = 1;
2677 spin_lock_irqsave(&tp->lock, flags);
2679 if (sblk->status & SD_STATUS_UPDATED) {
2681 * writing any value to intr-mbox-0 clears PCI INTA# and
2682 * chip-internal interrupt pending events.
2683 * writing non-zero to intr-mbox-0 additional tells the
2684 * NIC to stop sending us irqs, engaging "in-intr-handler"
2687 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2690 * Flush PCI write. This also guarantees that our
2691 * status block has been flushed to host memory.
2693 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2694 sblk->status &= ~SD_STATUS_UPDATED;
2696 if (likely(tg3_has_work(dev, tp)))
2697 netif_rx_schedule(dev); /* schedule NAPI poll */
2699 /* no work, shared interrupt perhaps? re-enable
2700 * interrupts, and flush that PCI write
2702 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2704 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2706 } else { /* shared interrupt */
2710 spin_unlock_irqrestore(&tp->lock, flags);
2712 return IRQ_RETVAL(handled);
2715 static int tg3_init_hw(struct tg3 *);
2716 static int tg3_halt(struct tg3 *);
2718 #ifdef CONFIG_NET_POLL_CONTROLLER
2719 static void tg3_poll_controller(struct net_device *dev)
2721 tg3_interrupt(dev->irq, dev, NULL);
2725 static void tg3_reset_task(void *_data)
2727 struct tg3 *tp = _data;
2728 unsigned int restart_timer;
2732 spin_lock_irq(&tp->lock);
2733 spin_lock(&tp->tx_lock);
2735 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2736 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2741 spin_unlock(&tp->tx_lock);
2742 spin_unlock_irq(&tp->lock);
2744 tg3_netif_start(tp);
2747 mod_timer(&tp->timer, jiffies + 1);
2750 static void tg3_tx_timeout(struct net_device *dev)
2752 struct tg3 *tp = netdev_priv(dev);
2754 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2757 schedule_work(&tp->reset_task);
2760 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2762 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2763 u32 guilty_entry, int guilty_len,
2764 u32 last_plus_one, u32 *start, u32 mss)
2766 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2767 dma_addr_t new_addr;
2776 /* New SKB is guaranteed to be linear. */
2778 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2780 tg3_set_txd(tp, entry, new_addr, new_skb->len,
2781 (skb->ip_summed == CHECKSUM_HW) ?
2782 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2783 *start = NEXT_TX(entry);
2785 /* Now clean up the sw ring entries. */
2787 while (entry != last_plus_one) {
2791 len = skb_headlen(skb);
2793 len = skb_shinfo(skb)->frags[i-1].size;
2794 pci_unmap_single(tp->pdev,
2795 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2796 len, PCI_DMA_TODEVICE);
2798 tp->tx_buffers[entry].skb = new_skb;
2799 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2801 tp->tx_buffers[entry].skb = NULL;
2803 entry = NEXT_TX(entry);
2811 static void tg3_set_txd(struct tg3 *tp, int entry,
2812 dma_addr_t mapping, int len, u32 flags,
2815 int is_end = (mss_and_is_end & 0x1);
2816 u32 mss = (mss_and_is_end >> 1);
2820 flags |= TXD_FLAG_END;
2821 if (flags & TXD_FLAG_VLAN) {
2822 vlan_tag = flags >> 16;
2825 vlan_tag |= (mss << TXD_MSS_SHIFT);
2826 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2827 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2829 txd->addr_hi = ((u64) mapping >> 32);
2830 txd->addr_lo = ((u64) mapping & 0xffffffff);
2831 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2832 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2834 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2839 NIC_SRAM_TX_BUFFER_DESC);
2840 txd += (entry * TXD_SIZE);
2842 /* Save some PIOs */
2843 if (sizeof(dma_addr_t) != sizeof(u32))
2844 writel(((u64) mapping >> 32),
2845 txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2847 writel(((u64) mapping & 0xffffffff),
2848 txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2849 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2850 if (txr->prev_vlan_tag != vlan_tag) {
2851 writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2852 txr->prev_vlan_tag = vlan_tag;
2857 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2859 u32 base = (u32) mapping & 0xffffffff;
2861 return ((base > 0xffffdcc0) &&
2862 (base + len + 8 < base));
2865 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867 struct tg3 *tp = netdev_priv(dev);
2870 u32 len, entry, base_flags, mss;
2871 int would_hit_hwbug;
2872 unsigned long flags;
2874 len = skb_headlen(skb);
2876 /* No BH disabling for tx_lock here. We are running in BH disabled
2877 * context and TX reclaim runs via tp->poll inside of a software
2878 * interrupt. Rejoice!
2880 * Actually, things are not so simple. If we are to take a hw
2881 * IRQ here, we can deadlock, consider:
2890 * spin on tp->tx_lock
2892 * So we really do need to disable interrupts when taking
2895 spin_lock_irqsave(&tp->tx_lock, flags);
2897 /* This is a hard error, log it. */
2898 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2899 netif_stop_queue(dev);
2900 spin_unlock_irqrestore(&tp->tx_lock, flags);
2901 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2906 entry = tp->tx_prod;
2908 if (skb->ip_summed == CHECKSUM_HW)
2909 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2910 #if TG3_TSO_SUPPORT != 0
2912 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2913 (mss = skb_shinfo(skb)->tso_size) != 0) {
2914 int tcp_opt_len, ip_tcp_len;
2916 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2917 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2919 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2920 TXD_FLAG_CPU_POST_DMA);
2922 skb->nh.iph->check = 0;
2923 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2924 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2929 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2932 tsflags = ((skb->nh.iph->ihl - 5) +
2933 (tcp_opt_len >> 2));
2934 mss |= (tsflags << 11);
2937 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2940 tsflags = ((skb->nh.iph->ihl - 5) +
2941 (tcp_opt_len >> 2));
2942 base_flags |= tsflags << 12;
2949 #if TG3_VLAN_TAG_USED
2950 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2951 base_flags |= (TXD_FLAG_VLAN |
2952 (vlan_tx_tag_get(skb) << 16));
2955 /* Queue skb data, a.k.a. the main skb fragment. */
2956 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2958 tp->tx_buffers[entry].skb = skb;
2959 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2961 would_hit_hwbug = 0;
2963 if (tg3_4g_overflow_test(mapping, len))
2964 would_hit_hwbug = entry + 1;
2966 tg3_set_txd(tp, entry, mapping, len, base_flags,
2967 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2969 entry = NEXT_TX(entry);
2971 /* Now loop through additional data fragments, and queue them. */
2972 if (skb_shinfo(skb)->nr_frags > 0) {
2973 unsigned int i, last;
2975 last = skb_shinfo(skb)->nr_frags - 1;
2976 for (i = 0; i <= last; i++) {
2977 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2980 mapping = pci_map_page(tp->pdev,
2983 len, PCI_DMA_TODEVICE);
2985 tp->tx_buffers[entry].skb = NULL;
2986 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2988 if (tg3_4g_overflow_test(mapping, len)) {
2989 /* Only one should match. */
2990 if (would_hit_hwbug)
2992 would_hit_hwbug = entry + 1;
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
2996 tg3_set_txd(tp, entry, mapping, len,
2997 base_flags, (i == last)|(mss << 1));
2999 tg3_set_txd(tp, entry, mapping, len,
3000 base_flags, (i == last));
3002 entry = NEXT_TX(entry);
3006 if (would_hit_hwbug) {
3007 u32 last_plus_one = entry;
3009 unsigned int len = 0;
3011 would_hit_hwbug -= 1;
3012 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3013 entry &= (TG3_TX_RING_SIZE - 1);
3016 while (entry != last_plus_one) {
3018 len = skb_headlen(skb);
3020 len = skb_shinfo(skb)->frags[i-1].size;
3022 if (entry == would_hit_hwbug)
3026 entry = NEXT_TX(entry);
3030 /* If the workaround fails due to memory/mapping
3031 * failure, silently drop this packet.
3033 if (tigon3_4gb_hwbug_workaround(tp, skb,
3042 /* Packets are ready, update Tx producer idx local and on card. */
3043 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3044 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
3045 TG3_64BIT_REG_LOW), entry);
3047 /* First, make sure tg3 sees last descriptor fully
3050 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3051 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
3053 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3054 TG3_64BIT_REG_LOW), entry);
3057 tp->tx_prod = entry;
3058 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3059 netif_stop_queue(dev);
3062 spin_unlock_irqrestore(&tp->tx_lock, flags);
3064 dev->trans_start = jiffies;
3069 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3074 if (new_mtu > ETH_DATA_LEN)
3075 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3077 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3080 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3082 struct tg3 *tp = netdev_priv(dev);
3084 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3087 if (!netif_running(dev)) {
3088 /* We'll just catch it later when the
3091 tg3_set_mtu(dev, tp, new_mtu);
3096 spin_lock_irq(&tp->lock);
3097 spin_lock(&tp->tx_lock);
3101 tg3_set_mtu(dev, tp, new_mtu);
3105 spin_unlock(&tp->tx_lock);
3106 spin_unlock_irq(&tp->lock);
3107 tg3_netif_start(tp);
3112 /* Free up pending packets in all rx/tx rings.
3114 * The chip has been shut down and the driver detached from
3115 * the networking, so no interrupts or new tx packets will
3116 * end up in the driver. tp->{tx,}lock is not held and we are not
3117 * in an interrupt context and thus may sleep.
3119 static void tg3_free_rings(struct tg3 *tp)
3121 struct ring_info *rxp;
3124 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3125 rxp = &tp->rx_std_buffers[i];
3127 if (rxp->skb == NULL)
3129 pci_unmap_single(tp->pdev,
3130 pci_unmap_addr(rxp, mapping),
3131 RX_PKT_BUF_SZ - tp->rx_offset,
3132 PCI_DMA_FROMDEVICE);
3133 dev_kfree_skb_any(rxp->skb);
3137 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3138 rxp = &tp->rx_jumbo_buffers[i];
3140 if (rxp->skb == NULL)
3142 pci_unmap_single(tp->pdev,
3143 pci_unmap_addr(rxp, mapping),
3144 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3145 PCI_DMA_FROMDEVICE);
3146 dev_kfree_skb_any(rxp->skb);
3150 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3151 struct tx_ring_info *txp;
3152 struct sk_buff *skb;
3155 txp = &tp->tx_buffers[i];
3163 pci_unmap_single(tp->pdev,
3164 pci_unmap_addr(txp, mapping),
3171 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3172 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3173 pci_unmap_page(tp->pdev,
3174 pci_unmap_addr(txp, mapping),
3175 skb_shinfo(skb)->frags[j].size,
3180 dev_kfree_skb_any(skb);
3184 /* Initialize tx/rx rings for packet processing.
3186 * The chip has been shut down and the driver detached from
3187 * the networking, so no interrupts or new tx packets will
3188 * end up in the driver. tp->{tx,}lock are held and thus
3191 static void tg3_init_rings(struct tg3 *tp)
3193 unsigned long start, end;
3196 /* Free up all the SKBs. */
3199 /* Zero out all descriptors. */
3200 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3201 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3202 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3204 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3205 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3209 NIC_SRAM_TX_BUFFER_DESC);
3210 end = start + TG3_TX_RING_BYTES;
3211 while (start < end) {
3215 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3216 tp->tx_buffers[i].prev_vlan_tag = 0;
3219 /* Initialize invariants of the rings, we only set this
3220 * stuff once. This works because the card does not
3221 * write into the rx buffer posting rings.
3223 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3224 struct tg3_rx_buffer_desc *rxd;
3226 rxd = &tp->rx_std[i];
3227 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3229 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3230 rxd->opaque = (RXD_OPAQUE_RING_STD |
3231 (i << RXD_OPAQUE_INDEX_SHIFT));
3234 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3235 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3236 struct tg3_rx_buffer_desc *rxd;
3238 rxd = &tp->rx_jumbo[i];
3239 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3241 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3243 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3244 (i << RXD_OPAQUE_INDEX_SHIFT));
3248 /* Now allocate fresh SKBs for each rx ring. */
3249 for (i = 0; i < tp->rx_pending; i++) {
3250 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3255 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3256 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3257 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3265 * Must not be invoked with interrupt sources disabled and
3266 * the hardware shutdown down.
3268 static void tg3_free_consistent(struct tg3 *tp)
3270 if (tp->rx_std_buffers) {
3271 kfree(tp->rx_std_buffers);
3272 tp->rx_std_buffers = NULL;
3275 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3276 tp->rx_std, tp->rx_std_mapping);
3280 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3281 tp->rx_jumbo, tp->rx_jumbo_mapping);
3282 tp->rx_jumbo = NULL;
3285 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3286 tp->rx_rcb, tp->rx_rcb_mapping);
3290 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3291 tp->tx_ring, tp->tx_desc_mapping);
3294 if (tp->hw_status) {
3295 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3296 tp->hw_status, tp->status_mapping);
3297 tp->hw_status = NULL;
3300 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3301 tp->hw_stats, tp->stats_mapping);
3302 tp->hw_stats = NULL;
3307 * Must not be invoked with interrupt sources disabled and
3308 * the hardware shutdown down. Can sleep.
3310 static int tg3_alloc_consistent(struct tg3 *tp)
3312 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3314 TG3_RX_JUMBO_RING_SIZE)) +
3315 (sizeof(struct tx_ring_info) *
3318 if (!tp->rx_std_buffers)
3321 memset(tp->rx_std_buffers, 0,
3322 (sizeof(struct ring_info) *
3324 TG3_RX_JUMBO_RING_SIZE)) +
3325 (sizeof(struct tx_ring_info) *
3328 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3329 tp->tx_buffers = (struct tx_ring_info *)
3330 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3332 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3333 &tp->rx_std_mapping);
3337 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3338 &tp->rx_jumbo_mapping);
3343 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3344 &tp->rx_rcb_mapping);
3348 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3349 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3350 &tp->tx_desc_mapping);
3355 tp->tx_desc_mapping = 0;
3358 tp->hw_status = pci_alloc_consistent(tp->pdev,
3360 &tp->status_mapping);
3364 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3365 sizeof(struct tg3_hw_stats),
3366 &tp->stats_mapping);
3370 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3371 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3376 tg3_free_consistent(tp);
3380 #define MAX_WAIT_CNT 1000
3382 /* To stop a block, clear the enable bit and poll till it
3383 * clears. tp->lock is held.
3385 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3398 /* We can't enable/disable these bits of the
3399 * 5705/5750, just say success.
3412 for (i = 0; i < MAX_WAIT_CNT; i++) {
3415 if ((val & enable_bit) == 0)
3419 if (i == MAX_WAIT_CNT) {
3420 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3421 "ofs=%lx enable_bit=%x\n",
3429 /* tp->lock is held. */
3430 static int tg3_abort_hw(struct tg3 *tp)
3434 tg3_disable_ints(tp);
3436 tp->rx_mode &= ~RX_MODE_ENABLE;
3437 tw32_f(MAC_RX_MODE, tp->rx_mode);
3440 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3441 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3442 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3443 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3444 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3445 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3447 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3448 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3449 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3450 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3451 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3452 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3453 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3457 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3458 tw32_f(MAC_MODE, tp->mac_mode);
3461 tp->tx_mode &= ~TX_MODE_ENABLE;
3462 tw32_f(MAC_TX_MODE, tp->tx_mode);
3464 for (i = 0; i < MAX_WAIT_CNT; i++) {
3466 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3469 if (i >= MAX_WAIT_CNT) {
3470 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3471 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3472 tp->dev->name, tr32(MAC_TX_MODE));
3476 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3477 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3478 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3480 tw32(FTQ_RESET, 0xffffffff);
3481 tw32(FTQ_RESET, 0x00000000);
3483 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3484 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3489 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3491 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3497 /* tp->lock is held. */
3498 static int tg3_nvram_lock(struct tg3 *tp)
3500 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3503 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3504 for (i = 0; i < 8000; i++) {
3505 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3515 /* tp->lock is held. */
3516 static void tg3_nvram_unlock(struct tg3 *tp)
3518 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3519 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3522 /* tp->lock is held. */
3523 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3525 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3526 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3528 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3530 case RESET_KIND_INIT:
3531 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3535 case RESET_KIND_SHUTDOWN:
3536 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3540 case RESET_KIND_SUSPEND:
3541 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3551 /* tp->lock is held. */
3552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3554 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3556 case RESET_KIND_INIT:
3557 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3558 DRV_STATE_START_DONE);
3561 case RESET_KIND_SHUTDOWN:
3562 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3563 DRV_STATE_UNLOAD_DONE);
3572 /* tp->lock is held. */
3573 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3575 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3577 case RESET_KIND_INIT:
3578 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3582 case RESET_KIND_SHUTDOWN:
3583 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3587 case RESET_KIND_SUSPEND:
3588 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3598 /* tp->lock is held. */
3599 static int tg3_chip_reset(struct tg3 *tp)
3605 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704))
3609 * We must avoid the readl() that normally takes place.
3610 * It locks machines, causes machine checks, and other
3611 * fun things. So, temporarily disable the 5701
3612 * hardware workaround, while we do the reset.
3614 flags_save = tp->tg3_flags;
3615 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3618 val = GRC_MISC_CFG_CORECLK_RESET;
3620 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3621 if (tr32(0x7e2c) == 0x60) {
3624 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3625 tw32(GRC_MISC_CFG, (1 << 29));
3630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3632 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3633 tw32(GRC_MISC_CFG, val);
3635 /* restore 5701 hardware bug workaround flag */
3636 tp->tg3_flags = flags_save;
3638 /* Flush PCI posted writes. The normal MMIO registers
3639 * are inaccessible at this time so this is the only
3640 * way to make this reliably. I tried to use indirect
3641 * register read/write but this upset some 5701 variants.
3643 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3647 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3648 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3652 /* Wait for link training to complete. */
3653 for (i = 0; i < 5000; i++)
3656 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3657 pci_write_config_dword(tp->pdev, 0xc4,
3658 cfg_val | (1 << 15));
3660 /* Set PCIE max payload size and clear error status. */
3661 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3664 /* Re-enable indirect register accesses. */
3665 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3666 tp->misc_host_ctrl);
3668 /* Set MAX PCI retry to zero. */
3669 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3670 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3671 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3672 val |= PCISTATE_RETRY_SAME_DMA;
3673 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3675 pci_restore_state(tp->pdev, tp->pci_cfg_state);
3677 /* Make sure PCI-X relaxed ordering bit is clear. */
3678 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3679 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3680 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3682 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3684 tw32(GRC_MODE, tp->grc_mode);
3686 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3687 u32 val = tr32(0xc4);
3689 tw32(0xc4, val | (1 << 15));
3692 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3694 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3695 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3696 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3697 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3700 if (tp->phy_id == PHY_ID_SERDES) {
3701 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3702 tw32_f(MAC_MODE, tp->mac_mode);
3704 tw32_f(MAC_MODE, 0);
3707 /* Wait for firmware initialization to complete. */
3708 for (i = 0; i < 100000; i++) {
3709 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3710 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3715 !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3716 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3717 "firmware will not restart magic=%08x\n",
3718 tp->dev->name, val);
3722 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3723 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3724 u32 val = tr32(0x7c00);
3726 tw32(0x7c00, val | (1 << 25));
3729 /* Reprobe ASF enable state. */
3730 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3731 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3732 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3733 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3736 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3737 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3738 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3740 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3747 /* tp->lock is held. */
3748 static void tg3_stop_fw(struct tg3 *tp)
3750 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3754 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3755 val = tr32(GRC_RX_CPU_EVENT);
3757 tw32(GRC_RX_CPU_EVENT, val);
3759 /* Wait for RX cpu to ACK the event. */
3760 for (i = 0; i < 100; i++) {
3761 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3768 /* tp->lock is held. */
3769 static int tg3_halt(struct tg3 *tp)
3775 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3778 err = tg3_chip_reset(tp);
3780 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3781 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3789 #define TG3_FW_RELEASE_MAJOR 0x0
3790 #define TG3_FW_RELASE_MINOR 0x0
3791 #define TG3_FW_RELEASE_FIX 0x0
3792 #define TG3_FW_START_ADDR 0x08000000
3793 #define TG3_FW_TEXT_ADDR 0x08000000
3794 #define TG3_FW_TEXT_LEN 0x9c0
3795 #define TG3_FW_RODATA_ADDR 0x080009c0
3796 #define TG3_FW_RODATA_LEN 0x60
3797 #define TG3_FW_DATA_ADDR 0x08000a40
3798 #define TG3_FW_DATA_LEN 0x20
3799 #define TG3_FW_SBSS_ADDR 0x08000a60
3800 #define TG3_FW_SBSS_LEN 0xc
3801 #define TG3_FW_BSS_ADDR 0x08000a70
3802 #define TG3_FW_BSS_LEN 0x10
3804 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3805 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3806 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3807 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3808 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3809 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3810 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3811 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3812 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3813 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3814 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3815 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3816 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3817 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3818 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3819 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3820 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3821 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3822 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3823 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3824 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3825 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3826 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3827 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3828 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3829 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3831 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3832 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3833 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3834 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3835 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3836 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3837 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3838 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3839 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3840 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3841 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3842 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3843 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3844 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3845 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3846 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3847 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3848 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3849 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3850 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3851 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3852 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3853 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3854 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3855 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3856 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3857 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3858 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3859 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3860 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3861 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3862 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3863 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3864 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3865 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3866 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3867 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3868 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3869 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3870 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3871 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3872 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3873 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3874 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3875 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3876 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3877 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3878 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3879 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3880 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3881 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3882 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3883 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3884 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3885 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3886 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3887 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3888 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3889 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3890 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3891 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3892 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3893 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
3894 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
3895 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
3898 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
3899 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
3900 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
3901 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
3902 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
3906 #if 0 /* All zeros, don't eat up space with it. */
3907 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
3908 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3909 0x00000000, 0x00000000, 0x00000000, 0x00000000
3913 #define RX_CPU_SCRATCH_BASE 0x30000
3914 #define RX_CPU_SCRATCH_SIZE 0x04000
3915 #define TX_CPU_SCRATCH_BASE 0x34000
3916 #define TX_CPU_SCRATCH_SIZE 0x04000
3918 /* tp->lock is held. */
3919 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3923 if (offset == TX_CPU_BASE &&
3924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3927 if (offset == RX_CPU_BASE) {
3928 for (i = 0; i < 10000; i++) {
3929 tw32(offset + CPU_STATE, 0xffffffff);
3930 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3931 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3935 tw32(offset + CPU_STATE, 0xffffffff);
3936 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3939 for (i = 0; i < 10000; i++) {
3940 tw32(offset + CPU_STATE, 0xffffffff);
3941 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3942 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3948 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
3951 (offset == RX_CPU_BASE ? "RX" : "TX"));
3958 unsigned int text_base;
3959 unsigned int text_len;
3961 unsigned int rodata_base;
3962 unsigned int rodata_len;
3964 unsigned int data_base;
3965 unsigned int data_len;
3969 /* tp->lock is held. */
3970 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
3971 int cpu_scratch_size, struct fw_info *info)
3974 u32 orig_tg3_flags = tp->tg3_flags;
3975 void (*write_op)(struct tg3 *, u32, u32);
3977 if (cpu_base == TX_CPU_BASE &&
3978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3979 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
3980 "TX cpu firmware on %s which is 5705.\n",
3985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3986 write_op = tg3_write_mem;
3988 write_op = tg3_write_indirect_reg32;
3990 /* Force use of PCI config space for indirect register
3993 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
3995 err = tg3_halt_cpu(tp, cpu_base);
3999 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4000 write_op(tp, cpu_scratch_base + i, 0);
4001 tw32(cpu_base + CPU_STATE, 0xffffffff);
4002 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4003 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4004 write_op(tp, (cpu_scratch_base +
4005 (info->text_base & 0xffff) +
4008 info->text_data[i] : 0));
4009 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4010 write_op(tp, (cpu_scratch_base +
4011 (info->rodata_base & 0xffff) +
4013 (info->rodata_data ?
4014 info->rodata_data[i] : 0));
4015 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4016 write_op(tp, (cpu_scratch_base +
4017 (info->data_base & 0xffff) +
4020 info->data_data[i] : 0));
4025 tp->tg3_flags = orig_tg3_flags;
4029 /* tp->lock is held. */
4030 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4032 struct fw_info info;
4035 info.text_base = TG3_FW_TEXT_ADDR;
4036 info.text_len = TG3_FW_TEXT_LEN;
4037 info.text_data = &tg3FwText[0];
4038 info.rodata_base = TG3_FW_RODATA_ADDR;
4039 info.rodata_len = TG3_FW_RODATA_LEN;
4040 info.rodata_data = &tg3FwRodata[0];
4041 info.data_base = TG3_FW_DATA_ADDR;
4042 info.data_len = TG3_FW_DATA_LEN;
4043 info.data_data = NULL;
4045 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4046 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4051 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4052 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4057 /* Now startup only the RX cpu. */
4058 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4059 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4061 for (i = 0; i < 5; i++) {
4062 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4064 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4065 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4066 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4070 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4071 "to set RX CPU PC, is %08x should be %08x\n",
4072 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4076 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4077 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4082 #if TG3_TSO_SUPPORT != 0
4084 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4085 #define TG3_TSO_FW_RELASE_MINOR 0x6
4086 #define TG3_TSO_FW_RELEASE_FIX 0x0
4087 #define TG3_TSO_FW_START_ADDR 0x08000000
4088 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4089 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4090 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4091 #define TG3_TSO_FW_RODATA_LEN 0x60
4092 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4093 #define TG3_TSO_FW_DATA_LEN 0x30
4094 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4095 #define TG3_TSO_FW_SBSS_LEN 0x2c
4096 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4097 #define TG3_TSO_FW_BSS_LEN 0x894
4099 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4100 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4101 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4102 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4103 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4104 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4105 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4106 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4107 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4108 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4109 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4110 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4111 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4112 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4113 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4114 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4115 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4116 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4117 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4118 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4119 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4120 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4121 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4122 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4123 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4124 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4125 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4126 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4127 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4128 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4129 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4130 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4131 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4132 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4133 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4134 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4135 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4136 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4137 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4138 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4139 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4140 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4141 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4142 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4143 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4144 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4145 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4146 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4147 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4148 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4149 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4150 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4151 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4152 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4153 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4154 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4155 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4156 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4157 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4158 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4159 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4160 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4161 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4162 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4163 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4164 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4165 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4166 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4167 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4168 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4169 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4170 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4171 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4172 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4173 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4174 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4175 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4176 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4177 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4178 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4179 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4180 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4181 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4182 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4183 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4184 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4185 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4186 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4187 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4188 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4189 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4190 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4191 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4192 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4193 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4194 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4195 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4196 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4197 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4198 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4199 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4200 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4201 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4202 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4203 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4204 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4205 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4206 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4207 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4208 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4209 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4210 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4211 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4212 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4213 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4214 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4215 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4216 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4217 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4218 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4219 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4220 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4221 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4222 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4223 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4224 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4225 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4226 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4227 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4228 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4229 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4230 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4231 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4232 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4233 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4234 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4235 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4236 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4237 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4238 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4239 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4240 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4241 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4242 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4243 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4244 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4245 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4246 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4247 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4248 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4249 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4250 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4251 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4252 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4253 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4254 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4255 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4256 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4257 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4258 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4259 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4260 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4261 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4262 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4263 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4264 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4265 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4266 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4267 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4268 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4269 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4270 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4271 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4272 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4273 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4274 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4275 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4276 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4277 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4278 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4279 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4280 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4281 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4282 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4283 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4284 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4285 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4286 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4287 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4288 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4289 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4290 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4291 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4292 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4293 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4294 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4295 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4296 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4297 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4298 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4299 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4300 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4301 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4302 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4303 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4304 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4305 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4306 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4307 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4308 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4309 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4310 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4311 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4312 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4313 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4314 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4315 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4316 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4317 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4318 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4319 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4320 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4321 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4322 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4323 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4324 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4325 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4326 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4327 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4328 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4329 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4330 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4331 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4332 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4333 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4334 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4335 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4336 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4337 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4338 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4339 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4340 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4341 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4342 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4343 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4344 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4345 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4346 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4347 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4348 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4349 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4350 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4351 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4352 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4353 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4354 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4355 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4356 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4357 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4358 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4359 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4360 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4361 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4362 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4363 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4364 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4365 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4366 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4367 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4368 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4369 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4370 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4371 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4372 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4373 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4374 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4375 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4376 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4377 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4378 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4379 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4380 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4381 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4382 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4383 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4386 u32 tg3TsoFwRodata[] = {
4387 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4388 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4389 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4390 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4394 u32 tg3TsoFwData[] = {
4395 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4396 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4400 /* 5705 needs a special version of the TSO firmware. */
4401 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4402 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4403 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4404 #define TG3_TSO5_FW_START_ADDR 0x00010000
4405 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4406 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4407 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4408 #define TG3_TSO5_FW_RODATA_LEN 0x50
4409 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4410 #define TG3_TSO5_FW_DATA_LEN 0x20
4411 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4412 #define TG3_TSO5_FW_SBSS_LEN 0x28
4413 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4414 #define TG3_TSO5_FW_BSS_LEN 0x88
4416 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4417 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4418 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4419 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4420 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4421 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4422 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4423 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4424 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4425 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4426 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4427 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4428 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4429 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4430 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4431 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4432 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4433 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4434 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4435 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4436 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4437 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4438 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4439 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4440 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4441 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4442 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4443 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4444 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4445 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4446 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4447 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4448 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4449 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4450 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4451 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4452 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4453 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4454 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4455 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4456 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4457 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4458 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4459 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4460 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4461 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4462 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4463 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4464 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4465 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4466 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4467 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4468 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4469 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4470 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4471 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4472 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4473 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4474 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4475 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4476 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4477 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4478 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4479 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4480 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4481 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4482 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4483 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4484 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4485 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4486 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4487 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4488 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4489 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4490 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4491 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4492 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4493 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4494 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4495 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4496 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4497 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4498 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4499 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4500 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4501 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4502 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4503 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4504 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4505 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4506 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4507 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4508 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4509 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4510 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4511 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4512 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4513 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4514 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4515 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4516 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4517 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4518 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4519 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4520 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4521 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4522 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4523 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4524 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4525 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4526 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4527 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4528 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4529 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4530 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4531 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4532 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4533 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4534 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4535 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4536 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4537 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4538 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4539 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4540 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4541 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4542 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4543 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4544 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4545 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4546 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4547 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4548 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4549 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4550 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4551 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4552 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4553 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4554 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4555 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4556 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4557 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4558 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4559 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4560 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4561 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4562 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4563 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4564 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4565 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4566 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4567 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4568 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4569 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4570 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4571 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4572 0x00000000, 0x00000000, 0x00000000,
4575 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4576 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4577 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4578 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4579 0x00000000, 0x00000000, 0x00000000,
4582 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4583 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4584 0x00000000, 0x00000000, 0x00000000,
4587 /* tp->lock is held. */
4588 static int tg3_load_tso_firmware(struct tg3 *tp)
4590 struct fw_info info;
4591 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4598 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4599 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4600 info.text_data = &tg3Tso5FwText[0];
4601 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4602 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4603 info.rodata_data = &tg3Tso5FwRodata[0];
4604 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4605 info.data_len = TG3_TSO5_FW_DATA_LEN;
4606 info.data_data = &tg3Tso5FwData[0];
4607 cpu_base = RX_CPU_BASE;
4608 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4609 cpu_scratch_size = (info.text_len +
4612 TG3_TSO5_FW_SBSS_LEN +
4613 TG3_TSO5_FW_BSS_LEN);
4615 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4616 info.text_len = TG3_TSO_FW_TEXT_LEN;
4617 info.text_data = &tg3TsoFwText[0];
4618 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4619 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4620 info.rodata_data = &tg3TsoFwRodata[0];
4621 info.data_base = TG3_TSO_FW_DATA_ADDR;
4622 info.data_len = TG3_TSO_FW_DATA_LEN;
4623 info.data_data = &tg3TsoFwData[0];
4624 cpu_base = TX_CPU_BASE;
4625 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4626 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4629 err = tg3_load_firmware_cpu(tp, cpu_base,
4630 cpu_scratch_base, cpu_scratch_size,
4635 /* Now startup the cpu. */
4636 tw32(cpu_base + CPU_STATE, 0xffffffff);
4637 tw32_f(cpu_base + CPU_PC, info.text_base);
4639 for (i = 0; i < 5; i++) {
4640 if (tr32(cpu_base + CPU_PC) == info.text_base)
4642 tw32(cpu_base + CPU_STATE, 0xffffffff);
4643 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
4644 tw32_f(cpu_base + CPU_PC, info.text_base);
4648 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4649 "to set CPU PC, is %08x should be %08x\n",
4650 tp->dev->name, tr32(cpu_base + CPU_PC),
4654 tw32(cpu_base + CPU_STATE, 0xffffffff);
4655 tw32_f(cpu_base + CPU_MODE, 0x00000000);
4659 #endif /* TG3_TSO_SUPPORT != 0 */
4661 /* tp->lock is held. */
4662 static void __tg3_set_mac_addr(struct tg3 *tp)
4664 u32 addr_high, addr_low;
4667 addr_high = ((tp->dev->dev_addr[0] << 8) |
4668 tp->dev->dev_addr[1]);
4669 addr_low = ((tp->dev->dev_addr[2] << 24) |
4670 (tp->dev->dev_addr[3] << 16) |
4671 (tp->dev->dev_addr[4] << 8) |
4672 (tp->dev->dev_addr[5] << 0));
4673 for (i = 0; i < 4; i++) {
4674 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4675 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4678 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4679 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4680 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4681 for (i = 0; i < 12; i++) {
4682 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4683 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4687 addr_high = (tp->dev->dev_addr[0] +
4688 tp->dev->dev_addr[1] +
4689 tp->dev->dev_addr[2] +
4690 tp->dev->dev_addr[3] +
4691 tp->dev->dev_addr[4] +
4692 tp->dev->dev_addr[5]) &
4693 TX_BACKOFF_SEED_MASK;
4694 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4697 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4699 struct tg3 *tp = netdev_priv(dev);
4700 struct sockaddr *addr = p;
4702 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4704 spin_lock_irq(&tp->lock);
4705 __tg3_set_mac_addr(tp);
4706 spin_unlock_irq(&tp->lock);
4711 /* tp->lock is held. */
4712 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4713 dma_addr_t mapping, u32 maxlen_flags,
4717 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4718 ((u64) mapping >> 32));
4720 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4721 ((u64) mapping & 0xffffffff));
4723 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4726 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4728 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4732 static void __tg3_set_rx_mode(struct net_device *);
4734 /* tp->lock is held. */
4735 static int tg3_reset_hw(struct tg3 *tp)
4737 u32 val, rdmac_mode;
4740 tg3_disable_ints(tp);
4744 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4746 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4747 err = tg3_abort_hw(tp);
4752 err = tg3_chip_reset(tp);
4756 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4758 /* This works around an issue with Athlon chipsets on
4759 * B3 tigon3 silicon. This bit has no effect on any
4760 * other revision. But do not set this on PCI Express
4763 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4764 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4765 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4767 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4768 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4769 val = tr32(TG3PCI_PCISTATE);
4770 val |= PCISTATE_RETRY_SAME_DMA;
4771 tw32(TG3PCI_PCISTATE, val);
4774 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4775 /* Enable some hw fixes. */
4776 val = tr32(TG3PCI_MSI_DATA);
4777 val |= (1 << 26) | (1 << 28) | (1 << 29);
4778 tw32(TG3PCI_MSI_DATA, val);
4781 /* Descriptor ring init may make accesses to the
4782 * NIC SRAM area to setup the TX descriptors, so we
4783 * can only do this after the hardware has been
4784 * successfully reset.
4788 /* This value is determined during the probe time DMA
4789 * engine test, tg3_test_dma.
4791 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4793 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4794 GRC_MODE_4X_NIC_SEND_RINGS |
4795 GRC_MODE_NO_TX_PHDR_CSUM |
4796 GRC_MODE_NO_RX_PHDR_CSUM);
4797 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4798 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4800 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4801 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4802 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4803 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4804 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4808 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4810 /* Setup the timer prescalar register. Clock is always 66Mhz. */
4811 val = tr32(GRC_MISC_CFG);
4813 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4814 tw32(GRC_MISC_CFG, val);
4816 /* Initialize MBUF/DESC pool. */
4817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4819 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4820 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4822 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4824 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4825 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4826 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4828 #if TG3_TSO_SUPPORT != 0
4829 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4832 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4833 TG3_TSO5_FW_RODATA_LEN +
4834 TG3_TSO5_FW_DATA_LEN +
4835 TG3_TSO5_FW_SBSS_LEN +
4836 TG3_TSO5_FW_BSS_LEN);
4837 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4838 tw32(BUFMGR_MB_POOL_ADDR,
4839 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4840 tw32(BUFMGR_MB_POOL_SIZE,
4841 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4845 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4846 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4847 tp->bufmgr_config.mbuf_read_dma_low_water);
4848 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4849 tp->bufmgr_config.mbuf_mac_rx_low_water);
4850 tw32(BUFMGR_MB_HIGH_WATER,
4851 tp->bufmgr_config.mbuf_high_water);
4853 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4854 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4855 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4856 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4857 tw32(BUFMGR_MB_HIGH_WATER,
4858 tp->bufmgr_config.mbuf_high_water_jumbo);
4860 tw32(BUFMGR_DMA_LOW_WATER,
4861 tp->bufmgr_config.dma_low_water);
4862 tw32(BUFMGR_DMA_HIGH_WATER,
4863 tp->bufmgr_config.dma_high_water);
4865 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4866 for (i = 0; i < 2000; i++) {
4867 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4872 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4877 /* Setup replenish threshold. */
4878 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4880 /* Initialize TG3_BDINFO's at:
4881 * RCVDBDI_STD_BD: standard eth size rx ring
4882 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
4883 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
4886 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
4887 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
4888 * ring attribute flags
4889 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
4891 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4892 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4894 * The size of each ring is fixed in the firmware, but the location is
4897 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4898 ((u64) tp->rx_std_mapping >> 32));
4899 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4900 ((u64) tp->rx_std_mapping & 0xffffffff));
4901 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
4902 NIC_SRAM_RX_BUFFER_DESC);
4904 /* Don't even try to program the JUMBO/MINI buffer descriptor
4907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
4908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4909 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4910 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
4912 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4913 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4915 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
4916 BDINFO_FLAGS_DISABLED);
4918 /* Setup replenish threshold. */
4919 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
4921 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
4922 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4923 ((u64) tp->rx_jumbo_mapping >> 32));
4924 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4925 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
4926 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4927 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4928 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
4929 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
4931 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4932 BDINFO_FLAGS_DISABLED);
4937 /* There is only one send ring on 5705/5750, no need to explicitly
4938 * disable the others.
4940 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
4941 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
4942 /* Clear out send RCB ring in SRAM. */
4943 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
4944 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4945 BDINFO_FLAGS_DISABLED);
4950 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4951 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4953 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
4954 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4955 tp->tx_desc_mapping,
4956 (TG3_TX_RING_SIZE <<
4957 BDINFO_FLAGS_MAXLEN_SHIFT),
4958 NIC_SRAM_TX_BUFFER_DESC);
4960 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4962 BDINFO_FLAGS_DISABLED,
4963 NIC_SRAM_TX_BUFFER_DESC);
4966 /* There is only one receive return ring on 5705/5750, no need
4967 * to explicitly disable the others.
4969 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
4970 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
4971 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
4972 i += TG3_BDINFO_SIZE) {
4973 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4974 BDINFO_FLAGS_DISABLED);
4979 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
4981 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
4983 (TG3_RX_RCB_RING_SIZE(tp) <<
4984 BDINFO_FLAGS_MAXLEN_SHIFT),
4987 tp->rx_std_ptr = tp->rx_pending;
4988 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4991 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
4992 tp->rx_jumbo_pending : 0;
4993 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4996 /* Initialize MAC address and backoff seed. */
4997 __tg3_set_mac_addr(tp);
4999 /* MTU + ethernet header + FCS + optional VLAN tag */
5000 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5002 /* The slot time is changed by tg3_setup_phy if we
5003 * run at gigabit with half duplex.
5005 tw32(MAC_TX_LENGTHS,
5006 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5007 (6 << TX_LENGTHS_IPG_SHIFT) |
5008 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5010 /* Receive rules. */
5011 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5012 tw32(RCVLPC_CONFIG, 0x0181);
5014 /* Calculate RDMAC_MODE setting early, we need it to determine
5015 * the RCVLPC_STATE_ENABLE mask.
5017 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5018 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5019 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5020 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5021 RDMAC_MODE_LNGREAD_ENAB);
5022 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5023 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5024 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5025 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5026 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5027 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5028 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5029 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5030 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5031 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5032 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5033 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5037 #if TG3_TSO_SUPPORT != 0
5038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5039 rdmac_mode |= (1 << 27);
5042 /* Receive/send statistics. */
5043 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5044 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5045 val = tr32(RCVLPC_STATS_ENABLE);
5046 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5047 tw32(RCVLPC_STATS_ENABLE, val);
5049 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5051 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5052 tw32(SNDDATAI_STATSENAB, 0xffffff);
5053 tw32(SNDDATAI_STATSCTRL,
5054 (SNDDATAI_SCTRL_ENABLE |
5055 SNDDATAI_SCTRL_FASTUPD));
5057 /* Setup host coalescing engine. */
5058 tw32(HOSTCC_MODE, 0);
5059 for (i = 0; i < 2000; i++) {
5060 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5065 tw32(HOSTCC_RXCOL_TICKS, 0);
5066 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5067 tw32(HOSTCC_RXMAX_FRAMES, 1);
5068 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5071 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5072 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5074 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5075 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5077 /* set status block DMA address */
5078 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5079 ((u64) tp->status_mapping >> 32));
5080 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5081 ((u64) tp->status_mapping & 0xffffffff));
5083 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5084 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5085 /* Status/statistics block address. See tg3_timer,
5086 * the tg3_periodic_fetch_stats call there, and
5087 * tg3_get_stats to see how this works for 5705/5750 chips.
5089 tw32(HOSTCC_STAT_COAL_TICKS,
5090 DEFAULT_STAT_COAL_TICKS);
5091 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5092 ((u64) tp->stats_mapping >> 32));
5093 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5094 ((u64) tp->stats_mapping & 0xffffffff));
5095 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5096 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5099 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5101 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5102 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5103 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5104 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5105 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5107 /* Clear statistics/status block in chip, and status block in ram. */
5108 for (i = NIC_SRAM_STATS_BLK;
5109 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5111 tg3_write_mem(tp, i, 0);
5114 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5116 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5117 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5118 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5121 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5123 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5124 GRC_LCLCTRL_GPIO_OUTPUT1);
5125 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5128 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5129 tr32(MAILBOX_INTERRUPT_0);
5131 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5132 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5133 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5137 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5138 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5139 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5140 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5141 WDMAC_MODE_LNGREAD_ENAB);
5143 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5144 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5146 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5147 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5148 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5150 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5151 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5152 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5153 val |= WDMAC_MODE_RX_ACCEL;
5157 tw32_f(WDMAC_MODE, val);
5160 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5161 val = tr32(TG3PCI_X_CAPS);
5162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5163 val &= ~PCIX_CAPS_BURST_MASK;
5164 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5165 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5166 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5167 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5168 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5169 val |= (tp->split_mode_max_reqs <<
5170 PCIX_CAPS_SPLIT_SHIFT);
5172 tw32(TG3PCI_X_CAPS, val);
5175 tw32_f(RDMAC_MODE, rdmac_mode);
5178 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5179 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5180 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5181 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5182 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5183 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5184 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5185 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5186 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5187 #if TG3_TSO_SUPPORT != 0
5188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5189 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5191 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5192 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5194 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5195 err = tg3_load_5701_a0_firmware_fix(tp);
5200 #if TG3_TSO_SUPPORT != 0
5201 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5202 err = tg3_load_tso_firmware(tp);
5208 tp->tx_mode = TX_MODE_ENABLE;
5209 tw32_f(MAC_TX_MODE, tp->tx_mode);
5212 tp->rx_mode = RX_MODE_ENABLE;
5213 tw32_f(MAC_RX_MODE, tp->rx_mode);
5216 if (tp->link_config.phy_is_low_power) {
5217 tp->link_config.phy_is_low_power = 0;
5218 tp->link_config.speed = tp->link_config.orig_speed;
5219 tp->link_config.duplex = tp->link_config.orig_duplex;
5220 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5223 tp->mi_mode = MAC_MI_MODE_BASE;
5224 tw32_f(MAC_MI_MODE, tp->mi_mode);
5227 tw32(MAC_LED_CTRL, tp->led_ctrl);
5229 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5230 if (tp->phy_id == PHY_ID_SERDES) {
5231 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5234 tw32_f(MAC_RX_MODE, tp->rx_mode);
5237 if (tp->phy_id == PHY_ID_SERDES) {
5238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5239 /* Set drive transmission level to 1.2V */
5240 val = tr32(MAC_SERDES_CFG);
5243 tw32(MAC_SERDES_CFG, val);
5245 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5246 tw32(MAC_SERDES_CFG, 0x616000);
5249 /* Prevent chip from dropping frames when flow control
5252 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5255 tp->phy_id == PHY_ID_SERDES) {
5256 /* Enable hardware link auto-negotiation */
5257 u32 digctrl, txctrl;
5259 digctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_CRC16_CLEAR_N |
5260 SG_DIG_LOCAL_DUPLEX_STATUS | SG_DIG_LOCAL_LINK_STATUS |
5261 (2 << SG_DIG_SPEED_STATUS_SHIFT) | SG_DIG_FIBER_MODE |
5264 txctrl = tr32(MAC_SERDES_CFG);
5265 tw32_f(MAC_SERDES_CFG, txctrl | MAC_SERDES_CFG_EDGE_SELECT);
5266 tw32_f(SG_DIG_CTRL, digctrl | SG_DIG_SOFT_RESET);
5269 tw32_f(SG_DIG_CTRL, digctrl);
5271 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5274 err = tg3_setup_phy(tp, 1);
5278 if (tp->phy_id != PHY_ID_SERDES) {
5281 /* Clear CRC stats. */
5282 tg3_readphy(tp, 0x1e, &tmp);
5283 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5284 tg3_readphy(tp, 0x14, &tmp);
5287 __tg3_set_rx_mode(tp->dev);
5289 /* Initialize receive rules. */
5290 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5291 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5292 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5293 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5295 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5300 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5304 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5306 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5308 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5310 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5312 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5314 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5316 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5318 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5320 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5322 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5324 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5326 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5328 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5330 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5338 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5340 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5341 tg3_enable_ints(tp);
5346 /* Called at device open time to get the chip ready for
5347 * packet processing. Invoked with tp->lock held.
5349 static int tg3_init_hw(struct tg3 *tp)
5353 /* Force the chip into D0. */
5354 err = tg3_set_power_state(tp, 0);
5358 tg3_switch_clocks(tp);
5360 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5362 err = tg3_reset_hw(tp);
5368 #define TG3_STAT_ADD32(PSTAT, REG) \
5369 do { u32 __val = tr32(REG); \
5370 (PSTAT)->low += __val; \
5371 if ((PSTAT)->low < __val) \
5372 (PSTAT)->high += 1; \
5375 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5377 struct tg3_hw_stats *sp = tp->hw_stats;
5379 if (!netif_carrier_ok(tp->dev))
5382 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5383 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5384 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5385 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5386 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5387 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5388 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5389 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5390 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5391 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5392 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5393 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5394 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5396 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5397 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5398 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5399 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5400 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5401 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5402 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5403 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5404 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5405 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5406 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5407 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5408 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5409 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5412 static void tg3_timer(unsigned long __opaque)
5414 struct tg3 *tp = (struct tg3 *) __opaque;
5415 unsigned long flags;
5417 spin_lock_irqsave(&tp->lock, flags);
5418 spin_lock(&tp->tx_lock);
5420 /* All of this garbage is because when using non-tagged
5421 * IRQ status the mailbox/status_block protocol the chip
5422 * uses with the cpu is race prone.
5424 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5425 tw32(GRC_LOCAL_CTRL,
5426 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5428 tw32(HOSTCC_MODE, tp->coalesce_mode |
5429 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5432 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5433 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5434 spin_unlock(&tp->tx_lock);
5435 spin_unlock_irqrestore(&tp->lock, flags);
5436 schedule_work(&tp->reset_task);
5440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5442 tg3_periodic_fetch_stats(tp);
5444 /* This part only runs once per second. */
5445 if (!--tp->timer_counter) {
5446 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5450 mac_stat = tr32(MAC_STATUS);
5453 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5454 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5456 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5460 tg3_setup_phy(tp, 0);
5461 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5462 u32 mac_stat = tr32(MAC_STATUS);
5465 if (netif_carrier_ok(tp->dev) &&
5466 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5469 if (! netif_carrier_ok(tp->dev) &&
5470 (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5476 ~MAC_MODE_PORT_MODE_MASK));
5478 tw32_f(MAC_MODE, tp->mac_mode);
5480 tg3_setup_phy(tp, 0);
5484 tp->timer_counter = tp->timer_multiplier;
5487 /* Heartbeat is only sent once every 120 seconds. */
5488 if (!--tp->asf_counter) {
5489 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5492 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5493 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5494 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5495 val = tr32(GRC_RX_CPU_EVENT);
5497 tw32(GRC_RX_CPU_EVENT, val);
5499 tp->asf_counter = tp->asf_multiplier;
5502 spin_unlock(&tp->tx_lock);
5503 spin_unlock_irqrestore(&tp->lock, flags);
5505 tp->timer.expires = jiffies + tp->timer_offset;
5506 add_timer(&tp->timer);
5509 static int tg3_open(struct net_device *dev)
5511 struct tg3 *tp = netdev_priv(dev);
5514 spin_lock_irq(&tp->lock);
5515 spin_lock(&tp->tx_lock);
5517 tg3_disable_ints(tp);
5518 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5520 spin_unlock(&tp->tx_lock);
5521 spin_unlock_irq(&tp->lock);
5523 /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5524 * tp->tg3_flags is accurate at that new place.
5526 err = tg3_alloc_consistent(tp);
5530 err = request_irq(dev->irq, tg3_interrupt,
5531 SA_SHIRQ, dev->name, dev);
5534 tg3_free_consistent(tp);
5538 spin_lock_irq(&tp->lock);
5539 spin_lock(&tp->tx_lock);
5541 err = tg3_init_hw(tp);
5546 tp->timer_offset = HZ / 10;
5547 tp->timer_counter = tp->timer_multiplier = 10;
5548 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5550 init_timer(&tp->timer);
5551 tp->timer.expires = jiffies + tp->timer_offset;
5552 tp->timer.data = (unsigned long) tp;
5553 tp->timer.function = tg3_timer;
5554 add_timer(&tp->timer);
5556 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5559 spin_unlock(&tp->tx_lock);
5560 spin_unlock_irq(&tp->lock);
5563 free_irq(dev->irq, dev);
5564 tg3_free_consistent(tp);
5568 spin_lock_irq(&tp->lock);
5569 spin_lock(&tp->tx_lock);
5571 tg3_enable_ints(tp);
5573 spin_unlock(&tp->tx_lock);
5574 spin_unlock_irq(&tp->lock);
5576 netif_start_queue(dev);
5582 /*static*/ void tg3_dump_state(struct tg3 *tp)
5584 u32 val32, val32_2, val32_3, val32_4, val32_5;
5588 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5589 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5590 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5594 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5595 tr32(MAC_MODE), tr32(MAC_STATUS));
5596 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5597 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5598 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5599 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5600 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5601 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5603 /* Send data initiator control block */
5604 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5605 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5606 printk(" SNDDATAI_STATSCTRL[%08x]\n",
5607 tr32(SNDDATAI_STATSCTRL));
5609 /* Send data completion control block */
5610 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5612 /* Send BD ring selector block */
5613 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5614 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5616 /* Send BD initiator control block */
5617 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5618 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5620 /* Send BD completion control block */
5621 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5623 /* Receive list placement control block */
5624 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5625 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5626 printk(" RCVLPC_STATSCTRL[%08x]\n",
5627 tr32(RCVLPC_STATSCTRL));
5629 /* Receive data and receive BD initiator control block */
5630 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5631 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5633 /* Receive data completion control block */
5634 printk("DEBUG: RCVDCC_MODE[%08x]\n",
5637 /* Receive BD initiator control block */
5638 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5639 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5641 /* Receive BD completion control block */
5642 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5643 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5645 /* Receive list selector control block */
5646 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5647 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5649 /* Mbuf cluster free block */
5650 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5651 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5653 /* Host coalescing control block */
5654 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5655 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5656 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5657 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5658 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5659 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5660 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5661 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5662 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5663 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5664 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5665 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5667 /* Memory arbiter control block */
5668 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5669 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5671 /* Buffer manager control block */
5672 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5673 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5674 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5675 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5676 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5677 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5678 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5679 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5681 /* Read DMA control block */
5682 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5683 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5685 /* Write DMA control block */
5686 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5687 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5689 /* DMA completion block */
5690 printk("DEBUG: DMAC_MODE[%08x]\n",
5694 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5695 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5696 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5697 tr32(GRC_LOCAL_CTRL));
5700 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5701 tr32(RCVDBDI_JUMBO_BD + 0x0),
5702 tr32(RCVDBDI_JUMBO_BD + 0x4),
5703 tr32(RCVDBDI_JUMBO_BD + 0x8),
5704 tr32(RCVDBDI_JUMBO_BD + 0xc));
5705 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5706 tr32(RCVDBDI_STD_BD + 0x0),
5707 tr32(RCVDBDI_STD_BD + 0x4),
5708 tr32(RCVDBDI_STD_BD + 0x8),
5709 tr32(RCVDBDI_STD_BD + 0xc));
5710 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5711 tr32(RCVDBDI_MINI_BD + 0x0),
5712 tr32(RCVDBDI_MINI_BD + 0x4),
5713 tr32(RCVDBDI_MINI_BD + 0x8),
5714 tr32(RCVDBDI_MINI_BD + 0xc));
5716 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5717 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5718 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5719 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5720 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5721 val32, val32_2, val32_3, val32_4);
5723 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5724 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5725 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5726 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5727 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5728 val32, val32_2, val32_3, val32_4);
5730 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5731 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5732 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5733 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5734 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5735 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5736 val32, val32_2, val32_3, val32_4, val32_5);
5738 /* SW status block */
5739 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5740 tp->hw_status->status,
5741 tp->hw_status->status_tag,
5742 tp->hw_status->rx_jumbo_consumer,
5743 tp->hw_status->rx_consumer,
5744 tp->hw_status->rx_mini_consumer,
5745 tp->hw_status->idx[0].rx_producer,
5746 tp->hw_status->idx[0].tx_consumer);
5748 /* SW statistics block */
5749 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5750 ((u32 *)tp->hw_stats)[0],
5751 ((u32 *)tp->hw_stats)[1],
5752 ((u32 *)tp->hw_stats)[2],
5753 ((u32 *)tp->hw_stats)[3]);
5756 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5757 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5758 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5759 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5760 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5762 /* NIC side send descriptors. */
5763 for (i = 0; i < 6; i++) {
5766 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5767 + (i * sizeof(struct tg3_tx_buffer_desc));
5768 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5770 readl(txd + 0x0), readl(txd + 0x4),
5771 readl(txd + 0x8), readl(txd + 0xc));
5774 /* NIC side RX descriptors. */
5775 for (i = 0; i < 6; i++) {
5778 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5779 + (i * sizeof(struct tg3_rx_buffer_desc));
5780 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5782 readl(rxd + 0x0), readl(rxd + 0x4),
5783 readl(rxd + 0x8), readl(rxd + 0xc));
5784 rxd += (4 * sizeof(u32));
5785 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5787 readl(rxd + 0x0), readl(rxd + 0x4),
5788 readl(rxd + 0x8), readl(rxd + 0xc));
5791 for (i = 0; i < 6; i++) {
5794 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5795 + (i * sizeof(struct tg3_rx_buffer_desc));
5796 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5798 readl(rxd + 0x0), readl(rxd + 0x4),
5799 readl(rxd + 0x8), readl(rxd + 0xc));
5800 rxd += (4 * sizeof(u32));
5801 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5803 readl(rxd + 0x0), readl(rxd + 0x4),
5804 readl(rxd + 0x8), readl(rxd + 0xc));
5809 static struct net_device_stats *tg3_get_stats(struct net_device *);
5810 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5812 static int tg3_close(struct net_device *dev)
5814 struct tg3 *tp = netdev_priv(dev);
5816 netif_stop_queue(dev);
5818 del_timer_sync(&tp->timer);
5820 spin_lock_irq(&tp->lock);
5821 spin_lock(&tp->tx_lock);
5826 tg3_disable_ints(tp);
5831 ~(TG3_FLAG_INIT_COMPLETE |
5832 TG3_FLAG_GOT_SERDES_FLOWCTL);
5833 netif_carrier_off(tp->dev);
5835 spin_unlock(&tp->tx_lock);
5836 spin_unlock_irq(&tp->lock);
5838 free_irq(dev->irq, dev);
5840 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5841 sizeof(tp->net_stats_prev));
5842 memcpy(&tp->estats_prev, tg3_get_estats(tp),
5843 sizeof(tp->estats_prev));
5845 tg3_free_consistent(tp);
5850 static inline unsigned long get_stat64(tg3_stat64_t *val)
5854 #if (BITS_PER_LONG == 32)
5857 ret = ((u64)val->high << 32) | ((u64)val->low);
5862 static unsigned long calc_crc_errors(struct tg3 *tp)
5864 struct tg3_hw_stats *hw_stats = tp->hw_stats;
5866 if (tp->phy_id != PHY_ID_SERDES &&
5867 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5868 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5869 unsigned long flags;
5872 spin_lock_irqsave(&tp->lock, flags);
5873 tg3_readphy(tp, 0x1e, &val);
5874 tg3_writephy(tp, 0x1e, val | 0x8000);
5875 tg3_readphy(tp, 0x14, &val);
5876 spin_unlock_irqrestore(&tp->lock, flags);
5878 tp->phy_crc_errors += val;
5880 return tp->phy_crc_errors;
5883 return get_stat64(&hw_stats->rx_fcs_errors);
5886 #define ESTAT_ADD(member) \
5887 estats->member = old_estats->member + \
5888 get_stat64(&hw_stats->member)
5890 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5892 struct tg3_ethtool_stats *estats = &tp->estats;
5893 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5894 struct tg3_hw_stats *hw_stats = tp->hw_stats;
5899 ESTAT_ADD(rx_octets);
5900 ESTAT_ADD(rx_fragments);
5901 ESTAT_ADD(rx_ucast_packets);
5902 ESTAT_ADD(rx_mcast_packets);
5903 ESTAT_ADD(rx_bcast_packets);
5904 ESTAT_ADD(rx_fcs_errors);
5905 ESTAT_ADD(rx_align_errors);
5906 ESTAT_ADD(rx_xon_pause_rcvd);
5907 ESTAT_ADD(rx_xoff_pause_rcvd);
5908 ESTAT_ADD(rx_mac_ctrl_rcvd);
5909 ESTAT_ADD(rx_xoff_entered);
5910 ESTAT_ADD(rx_frame_too_long_errors);
5911 ESTAT_ADD(rx_jabbers);
5912 ESTAT_ADD(rx_undersize_packets);
5913 ESTAT_ADD(rx_in_length_errors);
5914 ESTAT_ADD(rx_out_length_errors);
5915 ESTAT_ADD(rx_64_or_less_octet_packets);
5916 ESTAT_ADD(rx_65_to_127_octet_packets);
5917 ESTAT_ADD(rx_128_to_255_octet_packets);
5918 ESTAT_ADD(rx_256_to_511_octet_packets);
5919 ESTAT_ADD(rx_512_to_1023_octet_packets);
5920 ESTAT_ADD(rx_1024_to_1522_octet_packets);
5921 ESTAT_ADD(rx_1523_to_2047_octet_packets);
5922 ESTAT_ADD(rx_2048_to_4095_octet_packets);
5923 ESTAT_ADD(rx_4096_to_8191_octet_packets);
5924 ESTAT_ADD(rx_8192_to_9022_octet_packets);
5926 ESTAT_ADD(tx_octets);
5927 ESTAT_ADD(tx_collisions);
5928 ESTAT_ADD(tx_xon_sent);
5929 ESTAT_ADD(tx_xoff_sent);
5930 ESTAT_ADD(tx_flow_control);
5931 ESTAT_ADD(tx_mac_errors);
5932 ESTAT_ADD(tx_single_collisions);
5933 ESTAT_ADD(tx_mult_collisions);
5934 ESTAT_ADD(tx_deferred);
5935 ESTAT_ADD(tx_excessive_collisions);
5936 ESTAT_ADD(tx_late_collisions);
5937 ESTAT_ADD(tx_collide_2times);
5938 ESTAT_ADD(tx_collide_3times);
5939 ESTAT_ADD(tx_collide_4times);
5940 ESTAT_ADD(tx_collide_5times);
5941 ESTAT_ADD(tx_collide_6times);
5942 ESTAT_ADD(tx_collide_7times);
5943 ESTAT_ADD(tx_collide_8times);
5944 ESTAT_ADD(tx_collide_9times);
5945 ESTAT_ADD(tx_collide_10times);
5946 ESTAT_ADD(tx_collide_11times);
5947 ESTAT_ADD(tx_collide_12times);
5948 ESTAT_ADD(tx_collide_13times);
5949 ESTAT_ADD(tx_collide_14times);
5950 ESTAT_ADD(tx_collide_15times);
5951 ESTAT_ADD(tx_ucast_packets);
5952 ESTAT_ADD(tx_mcast_packets);
5953 ESTAT_ADD(tx_bcast_packets);
5954 ESTAT_ADD(tx_carrier_sense_errors);
5955 ESTAT_ADD(tx_discards);
5956 ESTAT_ADD(tx_errors);
5958 ESTAT_ADD(dma_writeq_full);
5959 ESTAT_ADD(dma_write_prioq_full);
5960 ESTAT_ADD(rxbds_empty);
5961 ESTAT_ADD(rx_discards);
5962 ESTAT_ADD(rx_errors);
5963 ESTAT_ADD(rx_threshold_hit);
5965 ESTAT_ADD(dma_readq_full);
5966 ESTAT_ADD(dma_read_prioq_full);
5967 ESTAT_ADD(tx_comp_queue_full);
5969 ESTAT_ADD(ring_set_send_prod_index);
5970 ESTAT_ADD(ring_status_update);
5971 ESTAT_ADD(nic_irqs);
5972 ESTAT_ADD(nic_avoided_irqs);
5973 ESTAT_ADD(nic_tx_threshold_hit);
5978 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
5980 struct tg3 *tp = netdev_priv(dev);
5981 struct net_device_stats *stats = &tp->net_stats;
5982 struct net_device_stats *old_stats = &tp->net_stats_prev;
5983 struct tg3_hw_stats *hw_stats = tp->hw_stats;
5988 stats->rx_packets = old_stats->rx_packets +
5989 get_stat64(&hw_stats->rx_ucast_packets) +
5990 get_stat64(&hw_stats->rx_mcast_packets) +
5991 get_stat64(&hw_stats->rx_bcast_packets);
5993 stats->tx_packets = old_stats->tx_packets +
5994 get_stat64(&hw_stats->tx_ucast_packets) +
5995 get_stat64(&hw_stats->tx_mcast_packets) +
5996 get_stat64(&hw_stats->tx_bcast_packets);
5998 stats->rx_bytes = old_stats->rx_bytes +
5999 get_stat64(&hw_stats->rx_octets);
6000 stats->tx_bytes = old_stats->tx_bytes +
6001 get_stat64(&hw_stats->tx_octets);
6003 stats->rx_errors = old_stats->rx_errors +
6004 get_stat64(&hw_stats->rx_errors) +
6005 get_stat64(&hw_stats->rx_discards);
6006 stats->tx_errors = old_stats->tx_errors +
6007 get_stat64(&hw_stats->tx_errors) +
6008 get_stat64(&hw_stats->tx_mac_errors) +
6009 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6010 get_stat64(&hw_stats->tx_discards);
6012 stats->multicast = old_stats->multicast +
6013 get_stat64(&hw_stats->rx_mcast_packets);
6014 stats->collisions = old_stats->collisions +
6015 get_stat64(&hw_stats->tx_collisions);
6017 stats->rx_length_errors = old_stats->rx_length_errors +
6018 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6019 get_stat64(&hw_stats->rx_undersize_packets);
6021 stats->rx_over_errors = old_stats->rx_over_errors +
6022 get_stat64(&hw_stats->rxbds_empty);
6023 stats->rx_frame_errors = old_stats->rx_frame_errors +
6024 get_stat64(&hw_stats->rx_align_errors);
6025 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6026 get_stat64(&hw_stats->tx_discards);
6027 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6028 get_stat64(&hw_stats->tx_carrier_sense_errors);
6030 stats->rx_crc_errors = old_stats->rx_crc_errors +
6031 calc_crc_errors(tp);
6036 static inline u32 calc_crc(unsigned char *buf, int len)
6044 for (j = 0; j < len; j++) {
6047 for (k = 0; k < 8; k++) {
6061 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6063 /* accept or reject all multicast frames */
6064 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6065 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6066 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6067 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6070 static void __tg3_set_rx_mode(struct net_device *dev)
6072 struct tg3 *tp = netdev_priv(dev);
6075 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6076 RX_MODE_KEEP_VLAN_TAG);
6078 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6081 #if TG3_VLAN_TAG_USED
6083 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6084 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6086 /* By definition, VLAN is disabled always in this
6089 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6090 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6093 if (dev->flags & IFF_PROMISC) {
6094 /* Promiscuous mode. */
6095 rx_mode |= RX_MODE_PROMISC;
6096 } else if (dev->flags & IFF_ALLMULTI) {
6097 /* Accept all multicast. */
6098 tg3_set_multi (tp, 1);
6099 } else if (dev->mc_count < 1) {
6100 /* Reject all multicast. */
6101 tg3_set_multi (tp, 0);
6103 /* Accept one or more multicast(s). */
6104 struct dev_mc_list *mclist;
6106 u32 mc_filter[4] = { 0, };
6111 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6112 i++, mclist = mclist->next) {
6114 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6116 regidx = (bit & 0x60) >> 5;
6118 mc_filter[regidx] |= (1 << bit);
6121 tw32(MAC_HASH_REG_0, mc_filter[0]);
6122 tw32(MAC_HASH_REG_1, mc_filter[1]);
6123 tw32(MAC_HASH_REG_2, mc_filter[2]);
6124 tw32(MAC_HASH_REG_3, mc_filter[3]);
6127 if (rx_mode != tp->rx_mode) {
6128 tp->rx_mode = rx_mode;
6129 tw32_f(MAC_RX_MODE, rx_mode);
6134 static void tg3_set_rx_mode(struct net_device *dev)
6136 struct tg3 *tp = netdev_priv(dev);
6138 spin_lock_irq(&tp->lock);
6139 __tg3_set_rx_mode(dev);
6140 spin_unlock_irq(&tp->lock);
6143 #define TG3_REGDUMP_LEN (32 * 1024)
6145 static int tg3_get_regs_len(struct net_device *dev)
6147 return TG3_REGDUMP_LEN;
6150 static void tg3_get_regs(struct net_device *dev,
6151 struct ethtool_regs *regs, void *_p)
6154 struct tg3 *tp = netdev_priv(dev);
6160 memset(p, 0, TG3_REGDUMP_LEN);
6162 spin_lock_irq(&tp->lock);
6163 spin_lock(&tp->tx_lock);
6165 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6166 #define GET_REG32_LOOP(base,len) \
6167 do { p = (u32 *)(orig_p + (base)); \
6168 for (i = 0; i < len; i += 4) \
6169 __GET_REG32((base) + i); \
6171 #define GET_REG32_1(reg) \
6172 do { p = (u32 *)(orig_p + (reg)); \
6173 __GET_REG32((reg)); \
6176 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6177 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6178 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6179 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6180 GET_REG32_1(SNDDATAC_MODE);
6181 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6182 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6183 GET_REG32_1(SNDBDC_MODE);
6184 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6185 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6186 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6187 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6188 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6189 GET_REG32_1(RCVDCC_MODE);
6190 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6191 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6192 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6193 GET_REG32_1(MBFREE_MODE);
6194 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6195 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6196 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6197 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6198 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6199 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6200 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6201 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6202 GET_REG32_LOOP(FTQ_RESET, 0x120);
6203 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6204 GET_REG32_1(DMAC_MODE);
6205 GET_REG32_LOOP(GRC_MODE, 0x4c);
6206 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6207 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6210 #undef GET_REG32_LOOP
6213 spin_unlock(&tp->tx_lock);
6214 spin_unlock_irq(&tp->lock);
6217 static int tg3_get_eeprom_len(struct net_device *dev)
6219 return EEPROM_CHIP_SIZE;
6222 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6223 u32 offset, u32 *val);
6224 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6226 struct tg3 *tp = dev->priv;
6229 u32 i, offset, len, val, b_offset, b_count;
6231 offset = eeprom->offset;
6235 ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6238 eeprom->magic = swab32(eeprom->magic);
6241 /* adjustments to start on required 4 byte boundary */
6242 b_offset = offset & 3;
6243 b_count = 4 - b_offset;
6244 if (b_count > len) {
6245 /* i.e. offset=1 len=2 */
6248 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6251 memcpy(data, ((char*)&val) + b_offset, b_count);
6254 eeprom->len += b_count;
6257 /* read bytes upto the last 4 byte boundary */
6258 pd = &data[eeprom->len];
6259 for (i = 0; i < (len - (len & 3)); i += 4) {
6260 ret = tg3_nvram_read_using_eeprom(tp, offset + i,
6270 /* read last bytes not ending on 4 byte boundary */
6271 pd = &data[eeprom->len];
6273 b_offset = offset + len - b_count;
6274 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6277 memcpy(pd, ((char*)&val), b_count);
6278 eeprom->len += b_count;
6283 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6285 struct tg3 *tp = netdev_priv(dev);
6287 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6288 tp->link_config.phy_is_low_power)
6291 cmd->supported = (SUPPORTED_Autoneg);
6293 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6294 cmd->supported |= (SUPPORTED_1000baseT_Half |
6295 SUPPORTED_1000baseT_Full);
6297 if (tp->phy_id != PHY_ID_SERDES)
6298 cmd->supported |= (SUPPORTED_100baseT_Half |
6299 SUPPORTED_100baseT_Full |
6300 SUPPORTED_10baseT_Half |
6301 SUPPORTED_10baseT_Full |
6304 cmd->supported |= SUPPORTED_FIBRE;
6306 cmd->advertising = tp->link_config.advertising;
6307 cmd->speed = tp->link_config.active_speed;
6308 cmd->duplex = tp->link_config.active_duplex;
6310 cmd->phy_address = PHY_ADDR;
6311 cmd->transceiver = 0;
6312 cmd->autoneg = tp->link_config.autoneg;
6318 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6320 struct tg3 *tp = netdev_priv(dev);
6322 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6323 tp->link_config.phy_is_low_power)
6326 if (tp->phy_id == PHY_ID_SERDES) {
6327 /* These are the only valid advertisement bits allowed. */
6328 if (cmd->autoneg == AUTONEG_ENABLE &&
6329 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6330 ADVERTISED_1000baseT_Full |
6331 ADVERTISED_Autoneg |
6336 spin_lock_irq(&tp->lock);
6337 spin_lock(&tp->tx_lock);
6339 tp->link_config.autoneg = cmd->autoneg;
6340 if (cmd->autoneg == AUTONEG_ENABLE) {
6341 tp->link_config.advertising = cmd->advertising;
6342 tp->link_config.speed = SPEED_INVALID;
6343 tp->link_config.duplex = DUPLEX_INVALID;
6345 tp->link_config.advertising = 0;
6346 tp->link_config.speed = cmd->speed;
6347 tp->link_config.duplex = cmd->duplex;
6350 tg3_setup_phy(tp, 1);
6351 spin_unlock(&tp->tx_lock);
6352 spin_unlock_irq(&tp->lock);
6357 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6359 struct tg3 *tp = netdev_priv(dev);
6361 strcpy(info->driver, DRV_MODULE_NAME);
6362 strcpy(info->version, DRV_MODULE_VERSION);
6363 strcpy(info->bus_info, pci_name(tp->pdev));
6366 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6368 struct tg3 *tp = netdev_priv(dev);
6370 wol->supported = WAKE_MAGIC;
6372 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6373 wol->wolopts = WAKE_MAGIC;
6374 memset(&wol->sopass, 0, sizeof(wol->sopass));
6377 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6379 struct tg3 *tp = netdev_priv(dev);
6381 if (wol->wolopts & ~WAKE_MAGIC)
6383 if ((wol->wolopts & WAKE_MAGIC) &&
6384 tp->phy_id == PHY_ID_SERDES &&
6385 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6388 spin_lock_irq(&tp->lock);
6389 if (wol->wolopts & WAKE_MAGIC)
6390 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6392 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6393 spin_unlock_irq(&tp->lock);
6398 static u32 tg3_get_msglevel(struct net_device *dev)
6400 struct tg3 *tp = netdev_priv(dev);
6401 return tp->msg_enable;
6404 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6406 struct tg3 *tp = netdev_priv(dev);
6407 tp->msg_enable = value;
6410 #if TG3_TSO_SUPPORT != 0
6411 static int tg3_set_tso(struct net_device *dev, u32 value)
6413 struct tg3 *tp = netdev_priv(dev);
6415 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6420 return ethtool_op_set_tso(dev, value);
6424 static int tg3_nway_reset(struct net_device *dev)
6426 struct tg3 *tp = netdev_priv(dev);
6430 spin_lock_irq(&tp->lock);
6431 tg3_readphy(tp, MII_BMCR, &bmcr);
6432 tg3_readphy(tp, MII_BMCR, &bmcr);
6434 if (bmcr & BMCR_ANENABLE) {
6435 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6438 spin_unlock_irq(&tp->lock);
6443 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6445 struct tg3 *tp = netdev_priv(dev);
6447 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6448 ering->rx_mini_max_pending = 0;
6449 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6451 ering->rx_pending = tp->rx_pending;
6452 ering->rx_mini_pending = 0;
6453 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6454 ering->tx_pending = tp->tx_pending;
6457 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6459 struct tg3 *tp = netdev_priv(dev);
6461 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6462 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6463 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6467 spin_lock_irq(&tp->lock);
6468 spin_lock(&tp->tx_lock);
6470 tp->rx_pending = ering->rx_pending;
6472 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6473 tp->rx_pending > 63)
6474 tp->rx_pending = 63;
6475 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6476 tp->tx_pending = ering->tx_pending;
6480 netif_wake_queue(tp->dev);
6481 spin_unlock(&tp->tx_lock);
6482 spin_unlock_irq(&tp->lock);
6483 tg3_netif_start(tp);
6488 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6490 struct tg3 *tp = netdev_priv(dev);
6492 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6493 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6494 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6497 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6499 struct tg3 *tp = netdev_priv(dev);
6502 spin_lock_irq(&tp->lock);
6503 spin_lock(&tp->tx_lock);
6504 if (epause->autoneg)
6505 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6507 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6508 if (epause->rx_pause)
6509 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6511 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6512 if (epause->tx_pause)
6513 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6515 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6518 spin_unlock(&tp->tx_lock);
6519 spin_unlock_irq(&tp->lock);
6520 tg3_netif_start(tp);
6525 static u32 tg3_get_rx_csum(struct net_device *dev)
6527 struct tg3 *tp = netdev_priv(dev);
6528 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6531 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6533 struct tg3 *tp = netdev_priv(dev);
6535 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6541 spin_lock_irq(&tp->lock);
6543 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6545 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6546 spin_unlock_irq(&tp->lock);
6551 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6553 struct tg3 *tp = netdev_priv(dev);
6555 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6562 dev->features |= NETIF_F_IP_CSUM;
6564 dev->features &= ~NETIF_F_IP_CSUM;
6569 static int tg3_get_stats_count (struct net_device *dev)
6571 return TG3_NUM_STATS;
6574 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6576 switch (stringset) {
6578 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
6581 WARN_ON(1); /* we need a WARN() */
6586 static void tg3_get_ethtool_stats (struct net_device *dev,
6587 struct ethtool_stats *estats, u64 *tmp_stats)
6589 struct tg3 *tp = dev->priv;
6590 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6593 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6595 struct mii_ioctl_data *data = if_mii(ifr);
6596 struct tg3 *tp = netdev_priv(dev);
6601 data->phy_id = PHY_ADDR;
6607 if (tp->phy_id == PHY_ID_SERDES)
6608 break; /* We have no PHY */
6610 spin_lock_irq(&tp->lock);
6611 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6612 spin_unlock_irq(&tp->lock);
6614 data->val_out = mii_regval;
6620 if (tp->phy_id == PHY_ID_SERDES)
6621 break; /* We have no PHY */
6623 if (!capable(CAP_NET_ADMIN))
6626 spin_lock_irq(&tp->lock);
6627 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6628 spin_unlock_irq(&tp->lock);
6639 #if TG3_VLAN_TAG_USED
6640 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6642 struct tg3 *tp = netdev_priv(dev);
6644 spin_lock_irq(&tp->lock);
6645 spin_lock(&tp->tx_lock);
6649 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6650 __tg3_set_rx_mode(dev);
6652 spin_unlock(&tp->tx_lock);
6653 spin_unlock_irq(&tp->lock);
6656 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6658 struct tg3 *tp = netdev_priv(dev);
6660 spin_lock_irq(&tp->lock);
6661 spin_lock(&tp->tx_lock);
6663 tp->vlgrp->vlan_devices[vid] = NULL;
6664 spin_unlock(&tp->tx_lock);
6665 spin_unlock_irq(&tp->lock);
6669 static struct ethtool_ops tg3_ethtool_ops = {
6670 .get_settings = tg3_get_settings,
6671 .set_settings = tg3_set_settings,
6672 .get_drvinfo = tg3_get_drvinfo,
6673 .get_regs_len = tg3_get_regs_len,
6674 .get_regs = tg3_get_regs,
6675 .get_wol = tg3_get_wol,
6676 .set_wol = tg3_set_wol,
6677 .get_msglevel = tg3_get_msglevel,
6678 .set_msglevel = tg3_set_msglevel,
6679 .nway_reset = tg3_nway_reset,
6680 .get_link = ethtool_op_get_link,
6681 .get_eeprom_len = tg3_get_eeprom_len,
6682 .get_eeprom = tg3_get_eeprom,
6683 .get_ringparam = tg3_get_ringparam,
6684 .set_ringparam = tg3_set_ringparam,
6685 .get_pauseparam = tg3_get_pauseparam,
6686 .set_pauseparam = tg3_set_pauseparam,
6687 .get_rx_csum = tg3_get_rx_csum,
6688 .set_rx_csum = tg3_set_rx_csum,
6689 .get_tx_csum = ethtool_op_get_tx_csum,
6690 .set_tx_csum = tg3_set_tx_csum,
6691 .get_sg = ethtool_op_get_sg,
6692 .set_sg = ethtool_op_set_sg,
6693 #if TG3_TSO_SUPPORT != 0
6694 .get_tso = ethtool_op_get_tso,
6695 .set_tso = tg3_set_tso,
6697 .get_strings = tg3_get_strings,
6698 .get_stats_count = tg3_get_stats_count,
6699 .get_ethtool_stats = tg3_get_ethtool_stats,
6702 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6703 static void __devinit tg3_nvram_init(struct tg3 *tp)
6707 if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6710 tw32_f(GRC_EEPROM_ADDR,
6711 (EEPROM_ADDR_FSM_RESET |
6712 (EEPROM_DEFAULT_CLOCK_PERIOD <<
6713 EEPROM_ADDR_CLKPERD_SHIFT)));
6715 /* XXX schedule_timeout() ... */
6716 for (j = 0; j < 100; j++)
6719 /* Enable seeprom accesses. */
6720 tw32_f(GRC_LOCAL_CTRL,
6721 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6724 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6725 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6729 u32 nvaccess = tr32(NVRAM_ACCESS);
6731 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6734 nvcfg1 = tr32(NVRAM_CFG1);
6736 tp->tg3_flags |= TG3_FLAG_NVRAM;
6737 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6738 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6739 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6741 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6742 tw32(NVRAM_CFG1, nvcfg1);
6745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6746 u32 nvaccess = tr32(NVRAM_ACCESS);
6748 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6751 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6755 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6756 u32 offset, u32 *val)
6761 if (offset > EEPROM_ADDR_ADDR_MASK ||
6765 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6766 EEPROM_ADDR_DEVID_MASK |
6768 tw32(GRC_EEPROM_ADDR,
6770 (0 << EEPROM_ADDR_DEVID_SHIFT) |
6771 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6772 EEPROM_ADDR_ADDR_MASK) |
6773 EEPROM_ADDR_READ | EEPROM_ADDR_START);
6775 for (i = 0; i < 10000; i++) {
6776 tmp = tr32(GRC_EEPROM_ADDR);
6778 if (tmp & EEPROM_ADDR_COMPLETE)
6782 if (!(tmp & EEPROM_ADDR_COMPLETE))
6785 *val = tr32(GRC_EEPROM_DATA);
6789 static int __devinit tg3_nvram_read(struct tg3 *tp,
6790 u32 offset, u32 *val)
6794 if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6795 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6799 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6800 return tg3_nvram_read_using_eeprom(tp, offset, val);
6802 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6803 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6804 NVRAM_BUFFERED_PAGE_POS) +
6805 (offset % NVRAM_BUFFERED_PAGE_SIZE);
6807 if (offset > NVRAM_ADDR_MSK)
6812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6813 u32 nvaccess = tr32(NVRAM_ACCESS);
6815 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6818 tw32(NVRAM_ADDR, offset);
6820 NVRAM_CMD_RD | NVRAM_CMD_GO |
6821 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6823 /* Wait for done bit to clear. */
6824 for (i = 0; i < 1000; i++) {
6826 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6828 *val = swab32(tr32(NVRAM_RDDATA));
6833 tg3_nvram_unlock(tp);
6835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6836 u32 nvaccess = tr32(NVRAM_ACCESS);
6838 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6847 struct subsys_tbl_ent {
6848 u16 subsys_vendor, subsys_devid;
6852 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6853 /* Broadcom boards. */
6854 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6855 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6856 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6857 { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES }, /* BCM95700A9 */
6858 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6859 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6860 { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES }, /* BCM95701A7 */
6861 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6862 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6863 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6864 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6867 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6868 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6869 { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES }, /* 3C996SX */
6870 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6871 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6874 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6875 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6876 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6877 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6879 /* Compaq boards. */
6880 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6881 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6882 { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES }, /* CHANGELING */
6883 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6884 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6887 { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6890 static int __devinit tg3_phy_probe(struct tg3 *tp)
6892 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6893 u32 hw_phy_id, hw_phy_id_masked;
6895 int i, eeprom_signature_found, err;
6897 tp->phy_id = PHY_ID_INVALID;
6898 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6899 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6900 tp->pdev->subsystem_vendor) &&
6901 (subsys_id_to_phy_id[i].subsys_devid ==
6902 tp->pdev->subsystem_device)) {
6903 tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6908 eeprom_phy_id = PHY_ID_INVALID;
6909 eeprom_signature_found = 0;
6910 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6911 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6912 u32 nic_cfg, led_cfg;
6914 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6915 tp->nic_sram_data_cfg = nic_cfg;
6917 eeprom_signature_found = 1;
6919 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
6920 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
6921 eeprom_phy_id = PHY_ID_SERDES;
6925 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
6926 if (nic_phy_id != 0) {
6927 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
6928 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
6930 eeprom_phy_id = (id1 >> 16) << 10;
6931 eeprom_phy_id |= (id2 & 0xfc00) << 16;
6932 eeprom_phy_id |= (id2 & 0x03ff) << 0;
6936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6937 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
6938 led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
6939 SHASTA_EXT_LED_MODE_MASK);
6941 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
6945 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
6946 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
6949 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
6950 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
6953 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
6954 tp->led_ctrl = LED_CTRL_MODE_MAC;
6957 case SHASTA_EXT_LED_SHARED:
6958 tp->led_ctrl = LED_CTRL_MODE_SHARED;
6959 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6960 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
6961 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
6962 LED_CTRL_MODE_PHY_2);
6965 case SHASTA_EXT_LED_MAC:
6966 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
6969 case SHASTA_EXT_LED_COMBO:
6970 tp->led_ctrl = LED_CTRL_MODE_COMBO;
6971 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
6972 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
6973 LED_CTRL_MODE_PHY_2);
6978 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
6980 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
6981 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
6983 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
6984 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
6985 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
6986 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
6987 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
6989 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6990 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
6992 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6994 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
6995 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
6998 /* Reading the PHY ID register can conflict with ASF
6999 * firwmare access to the PHY hardware.
7002 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7003 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7005 /* Now read the physical PHY_ID from the chip and verify
7006 * that it is sane. If it doesn't look good, we fall back
7007 * to either the hard-coded table based PHY_ID and failing
7008 * that the value found in the eeprom area.
7010 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7011 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7013 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
7014 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7015 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
7017 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7020 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7021 tp->phy_id = hw_phy_id;
7023 /* phy_id currently holds the value found in the
7024 * subsys_id_to_phy_id[] table or PHY_ID_INVALID
7025 * if a match was not found there.
7027 if (tp->phy_id == PHY_ID_INVALID) {
7028 if (!eeprom_signature_found ||
7029 !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
7031 tp->phy_id = eeprom_phy_id;
7035 if (tp->phy_id != PHY_ID_SERDES &&
7036 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7037 u32 bmsr, adv_reg, tg3_ctrl;
7039 tg3_readphy(tp, MII_BMSR, &bmsr);
7040 tg3_readphy(tp, MII_BMSR, &bmsr);
7042 if (bmsr & BMSR_LSTATUS)
7043 goto skip_phy_reset;
7045 err = tg3_phy_reset(tp);
7049 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7050 ADVERTISE_100HALF | ADVERTISE_100FULL |
7051 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7053 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7054 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7055 MII_TG3_CTRL_ADV_1000_FULL);
7056 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7057 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7058 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7059 MII_TG3_CTRL_ENABLE_AS_MASTER);
7062 if (!tg3_copper_is_advertising_all(tp)) {
7063 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7065 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7066 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7068 tg3_writephy(tp, MII_BMCR,
7069 BMCR_ANENABLE | BMCR_ANRESTART);
7071 tg3_phy_set_wirespeed(tp);
7073 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7074 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7075 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7079 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7080 err = tg3_init_5401phy_dsp(tp);
7085 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7086 err = tg3_init_5401phy_dsp(tp);
7089 if (!eeprom_signature_found)
7090 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7092 if (tp->phy_id == PHY_ID_SERDES)
7093 tp->link_config.advertising =
7094 (ADVERTISED_1000baseT_Half |
7095 ADVERTISED_1000baseT_Full |
7096 ADVERTISED_Autoneg |
7098 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7099 tp->link_config.advertising &=
7100 ~(ADVERTISED_1000baseT_Half |
7101 ADVERTISED_1000baseT_Full);
7106 static void __devinit tg3_read_partno(struct tg3 *tp)
7108 unsigned char vpd_data[256];
7111 if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
7112 /* Sun decided not to put the necessary bits in the
7113 * NVRAM of their onboard tg3 parts :(
7115 strcpy(tp->board_part_number, "Sun 5704");
7119 for (i = 0; i < 256; i += 4) {
7122 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7125 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
7126 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
7127 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7128 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7131 /* Now parse and find the part number. */
7132 for (i = 0; i < 256; ) {
7133 unsigned char val = vpd_data[i];
7136 if (val == 0x82 || val == 0x91) {
7139 (vpd_data[i + 2] << 8)));
7146 block_end = (i + 3 +
7148 (vpd_data[i + 2] << 8)));
7150 while (i < block_end) {
7151 if (vpd_data[i + 0] == 'P' &&
7152 vpd_data[i + 1] == 'N') {
7153 int partno_len = vpd_data[i + 2];
7155 if (partno_len > 24)
7158 memcpy(tp->board_part_number,
7167 /* Part number not found. */
7172 strcpy(tp->board_part_number, "none");
7175 #ifdef CONFIG_SPARC64
7176 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
7178 struct pci_dev *pdev = tp->pdev;
7179 struct pcidev_cookie *pcp = pdev->sysdata;
7182 int node = pcp->prom_node;
7186 err = prom_getproperty(node, "subsystem-vendor-id",
7187 (char *) &venid, sizeof(venid));
7188 if (err == 0 || err == -1)
7190 err = prom_getproperty(node, "subsystem-id",
7191 (char *) &devid, sizeof(devid));
7192 if (err == 0 || err == -1)
7195 if (venid == PCI_VENDOR_ID_SUN &&
7196 devid == PCI_DEVICE_ID_TIGON3_5704)
7203 static int __devinit tg3_get_invariants(struct tg3 *tp)
7206 u32 cacheline_sz_reg;
7207 u32 pci_state_reg, grc_misc_cfg;
7212 #ifdef CONFIG_SPARC64
7213 if (tg3_is_sun_5704(tp))
7214 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
7217 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7218 * reordering to the mailbox registers done by the host
7219 * controller can cause major troubles. We read back from
7220 * every mailbox register write to force the writes to be
7221 * posted to the chip in order.
7223 if (pci_find_device(PCI_VENDOR_ID_INTEL,
7224 PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7225 pci_find_device(PCI_VENDOR_ID_INTEL,
7226 PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7227 pci_find_device(PCI_VENDOR_ID_INTEL,
7228 PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7229 pci_find_device(PCI_VENDOR_ID_INTEL,
7230 PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7231 pci_find_device(PCI_VENDOR_ID_AMD,
7232 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7233 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7235 /* Force memory write invalidate off. If we leave it on,
7236 * then on 5700_BX chips we have to enable a workaround.
7237 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7238 * to match the cacheline size. The Broadcom driver have this
7239 * workaround but turns MWI off all the times so never uses
7240 * it. This seems to suggest that the workaround is insufficient.
7242 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7243 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7244 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7246 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7247 * has the register indirect write enable bit set before
7248 * we try to access any of the MMIO registers. It is also
7249 * critical that the PCI-X hw workaround situation is decided
7250 * before that as well.
7252 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7255 tp->pci_chip_rev_id = (misc_ctrl_reg >>
7256 MISC_HOST_CTRL_CHIPREV_SHIFT);
7258 /* Initialize misc host control in PCI block. */
7259 tp->misc_host_ctrl |= (misc_ctrl_reg &
7260 MISC_HOST_CTRL_CHIPREV);
7261 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7262 tp->misc_host_ctrl);
7264 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7267 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
7268 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
7269 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
7270 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
7272 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7273 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7275 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7276 tp->pci_lat_timer < 64) {
7277 tp->pci_lat_timer = 64;
7279 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
7280 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
7281 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
7282 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
7284 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7288 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7291 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7292 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7294 /* If this is a 5700 BX chipset, and we are in PCI-X
7295 * mode, enable register write workaround.
7297 * The workaround is to use indirect register accesses
7298 * for all chip writes not to mailbox registers.
7300 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7304 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7306 /* The chip can have it's power management PCI config
7307 * space registers clobbered due to this bug.
7308 * So explicitly force the chip into D0 here.
7310 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7312 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7313 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7314 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7317 /* Also, force SERR#/PERR# in PCI command. */
7318 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7319 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7320 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7324 /* Back to back register writes can cause problems on this chip,
7325 * the workaround is to read back all reg writes except those to
7326 * mailbox regs. See tg3_write_indirect_reg32().
7328 * PCI Express 5750_A0 rev chips need this workaround too.
7330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7331 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7332 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7333 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7335 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7336 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7337 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7338 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7340 /* Chip-specific fixup from Broadcom driver */
7341 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7342 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7343 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7344 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7347 /* Force the chip into D0. */
7348 err = tg3_set_power_state(tp, 0);
7350 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7351 pci_name(tp->pdev));
7355 /* 5700 B0 chips do not support checksumming correctly due
7358 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7359 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7361 /* Pseudo-header checksum is done by hardware logic and not
7362 * the offload processers, so make the chip do the pseudo-
7363 * header checksums on receive. For transmit it is more
7364 * convenient to do the pseudo-header checksum in software
7365 * as Linux does that on transmit for us in all cases.
7367 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7368 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7370 /* Derive initial jumbo mode from MTU assigned in
7371 * ether_setup() via the alloc_etherdev() call
7373 if (tp->dev->mtu > ETH_DATA_LEN)
7374 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7376 /* Determine WakeOnLan speed to use. */
7377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7378 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7379 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7380 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7381 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7383 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7386 /* A few boards don't want Ethernet@WireSpeed phy feature */
7387 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7388 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7389 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7390 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7391 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7393 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7394 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7395 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7396 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7397 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7401 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7403 /* Only 5701 and later support tagged irq status mode.
7404 * Also, 5788 chips cannot use tagged irq status.
7406 * However, since we are using NAPI avoid tagged irq status
7407 * because the interrupt condition is more difficult to
7408 * fully clear in that mode.
7410 tp->coalesce_mode = 0;
7412 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7413 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7414 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7416 /* Initialize MAC MI mode, polling disabled. */
7417 tw32_f(MAC_MI_MODE, tp->mi_mode);
7420 /* Initialize data/descriptor byte/word swapping. */
7421 val = tr32(GRC_MODE);
7422 val &= GRC_MODE_HOST_STACKUP;
7423 tw32(GRC_MODE, val | tp->grc_mode);
7425 tg3_switch_clocks(tp);
7427 /* Clear this out for sanity. */
7428 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7430 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7432 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7433 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7434 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7436 if (chiprevid == CHIPREV_ID_5701_A0 ||
7437 chiprevid == CHIPREV_ID_5701_B0 ||
7438 chiprevid == CHIPREV_ID_5701_B2 ||
7439 chiprevid == CHIPREV_ID_5701_B5) {
7440 unsigned long sram_base;
7442 /* Write some dummy words into the SRAM status block
7443 * area, see if it reads back correctly. If the return
7444 * value is bad, force enable the PCIX workaround.
7446 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7448 writel(0x00000000, sram_base);
7449 writel(0x00000000, sram_base + 4);
7450 writel(0xffffffff, sram_base + 4);
7451 if (readl(sram_base) != 0x00000000)
7452 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7459 /* Always use host TXDs, it performs better in particular
7460 * with multi-frag packets. The tests below are kept here
7461 * as documentation should we change this decision again
7464 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7467 /* Determine if TX descriptors will reside in
7468 * main memory or in the chip SRAM.
7470 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
7471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7473 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7476 grc_misc_cfg = tr32(GRC_MISC_CFG);
7477 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7480 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7481 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7482 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7486 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7487 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7488 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7490 /* these are limited to 10/100 only */
7491 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7492 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7493 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7494 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7495 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7496 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7497 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7498 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7499 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7500 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7502 err = tg3_phy_probe(tp);
7504 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7505 pci_name(tp->pdev), err);
7506 /* ... but do not return immediately ... */
7509 tg3_read_partno(tp);
7511 if (tp->phy_id == PHY_ID_SERDES) {
7512 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7515 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7517 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7520 /* 5700 {AX,BX} chips have a broken status block link
7521 * change bit implementation, so we must use the
7522 * status register in those cases.
7524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7525 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7527 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7529 /* The led_ctrl is set during tg3_phy_probe, here we might
7530 * have to force the link status polling mechanism based
7531 * upon subsystem IDs.
7533 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7534 tp->phy_id != PHY_ID_SERDES) {
7535 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7536 TG3_FLAG_USE_LINKCHG_REG);
7539 /* For all SERDES we poll the MAC status register. */
7540 if (tp->phy_id == PHY_ID_SERDES)
7541 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7543 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7545 /* 5700 BX chips need to have their TX producer index mailboxes
7546 * written twice to workaround a bug.
7548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7549 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7551 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7553 /* It seems all chips can get confused if TX buffers
7554 * straddle the 4GB address boundary in some cases.
7556 tp->dev->hard_start_xmit = tg3_start_xmit;
7559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7560 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7563 /* By default, disable wake-on-lan. User can change this
7564 * using ETHTOOL_SWOL.
7566 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7571 #ifdef CONFIG_SPARC64
7572 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7574 struct net_device *dev = tp->dev;
7575 struct pci_dev *pdev = tp->pdev;
7576 struct pcidev_cookie *pcp = pdev->sysdata;
7579 int node = pcp->prom_node;
7581 if (prom_getproplen(node, "local-mac-address") == 6) {
7582 prom_getproperty(node, "local-mac-address",
7590 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7592 struct net_device *dev = tp->dev;
7594 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7599 static int __devinit tg3_get_device_address(struct tg3 *tp)
7601 struct net_device *dev = tp->dev;
7602 u32 hi, lo, mac_offset;
7604 #ifdef CONFIG_SPARC64
7605 if (!tg3_get_macaddr_sparc(tp))
7610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7611 !(tp->tg3_flags & TG3_FLG2_SUN_5704)) {
7612 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7614 if (tg3_nvram_lock(tp))
7615 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7617 tg3_nvram_unlock(tp);
7620 /* First try to get it from MAC address mailbox. */
7621 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7622 if ((hi >> 16) == 0x484b) {
7623 dev->dev_addr[0] = (hi >> 8) & 0xff;
7624 dev->dev_addr[1] = (hi >> 0) & 0xff;
7626 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7627 dev->dev_addr[2] = (lo >> 24) & 0xff;
7628 dev->dev_addr[3] = (lo >> 16) & 0xff;
7629 dev->dev_addr[4] = (lo >> 8) & 0xff;
7630 dev->dev_addr[5] = (lo >> 0) & 0xff;
7632 /* Next, try NVRAM. */
7633 else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7634 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7635 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7636 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7637 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7638 dev->dev_addr[2] = ((lo >> 0) & 0xff);
7639 dev->dev_addr[3] = ((lo >> 8) & 0xff);
7640 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7641 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7643 /* Finally just fetch it out of the MAC control regs. */
7645 hi = tr32(MAC_ADDR_0_HIGH);
7646 lo = tr32(MAC_ADDR_0_LOW);
7648 dev->dev_addr[5] = lo & 0xff;
7649 dev->dev_addr[4] = (lo >> 8) & 0xff;
7650 dev->dev_addr[3] = (lo >> 16) & 0xff;
7651 dev->dev_addr[2] = (lo >> 24) & 0xff;
7652 dev->dev_addr[1] = hi & 0xff;
7653 dev->dev_addr[0] = (hi >> 8) & 0xff;
7656 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7657 #ifdef CONFIG_SPARC64
7658 if (!tg3_get_default_macaddr_sparc(tp))
7666 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7668 struct tg3_internal_buffer_desc test_desc;
7672 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7674 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7675 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7676 tw32(RDMAC_STATUS, 0);
7677 tw32(WDMAC_STATUS, 0);
7679 tw32(BUFMGR_MODE, 0);
7682 test_desc.addr_hi = ((u64) buf_dma) >> 32;
7683 test_desc.addr_lo = buf_dma & 0xffffffff;
7684 test_desc.nic_mbuf = 0x00002100;
7685 test_desc.len = size;
7688 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7689 * the *second* time the tg3 driver was getting loaded after an
7692 * Broadcom tells me:
7693 * ...the DMA engine is connected to the GRC block and a DMA
7694 * reset may affect the GRC block in some unpredictable way...
7695 * The behavior of resets to individual blocks has not been tested.
7697 * Broadcom noted the GRC reset will also reset all sub-components.
7700 test_desc.cqid_sqid = (13 << 8) | 2;
7702 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7705 test_desc.cqid_sqid = (16 << 8) | 7;
7707 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7710 test_desc.flags = 0x00000005;
7712 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7715 val = *(((u32 *)&test_desc) + i);
7716 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7717 sram_dma_descs + (i * sizeof(u32)));
7718 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7720 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7723 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7725 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7729 for (i = 0; i < 40; i++) {
7733 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7735 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7736 if ((val & 0xffff) == sram_dma_descs) {
7747 #define TEST_BUFFER_SIZE 0x400
7749 static int __devinit tg3_test_dma(struct tg3 *tp)
7755 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7761 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7762 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7768 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7771 cacheline_size = 1024;
7773 cacheline_size = (int) byte * 4;
7775 switch (cacheline_size) {
7780 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7781 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7783 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7785 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7787 ~(DMA_RWCTRL_PCI_WRITE_CMD);
7789 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7794 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7795 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7797 DMA_RWCTRL_WRITE_BNDRY_256;
7798 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7800 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7805 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7806 tp->dma_rwctrl |= 0x001f0000;
7807 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7810 tp->dma_rwctrl |= 0x003f0000;
7812 tp->dma_rwctrl |= 0x003f000f;
7814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7816 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7818 if (ccval == 0x6 || ccval == 0x7)
7819 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7821 /* Set bit 23 to renable PCIX hw bug fix */
7822 tp->dma_rwctrl |= 0x009f0000;
7824 tp->dma_rwctrl |= 0x001b000f;
7828 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7829 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7830 tp->dma_rwctrl &= 0xfffffff0;
7832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7834 /* Remove this if it causes problems for some boards. */
7835 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7837 /* On 5700/5701 chips, we need to set this bit.
7838 * Otherwise the chip will issue cacheline transactions
7839 * to streamable DMA memory with not all the byte
7840 * enables turned on. This is an error on several
7841 * RISC PCI controllers, in particular sparc64.
7843 * On 5703/5704 chips, this bit has been reassigned
7844 * a different meaning. In particular, it is used
7845 * on those chips to enable a PCI-X workaround.
7847 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7850 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7853 /* Unneeded, already done by tg3_get_invariants. */
7854 tg3_switch_clocks(tp);
7858 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7859 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7865 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7868 /* Send the buffer to the chip. */
7869 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7871 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7876 /* validate data reached card RAM correctly. */
7877 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7879 tg3_read_mem(tp, 0x2100 + (i*4), &val);
7880 if (le32_to_cpu(val) != p[i]) {
7881 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
7882 /* ret = -ENODEV here? */
7887 /* Now read it back. */
7888 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7890 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7896 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7900 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7901 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7902 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7903 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7906 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7912 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
7920 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
7925 static void __devinit tg3_init_link_config(struct tg3 *tp)
7927 tp->link_config.advertising =
7928 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
7929 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
7930 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
7931 ADVERTISED_Autoneg | ADVERTISED_MII);
7932 tp->link_config.speed = SPEED_INVALID;
7933 tp->link_config.duplex = DUPLEX_INVALID;
7934 tp->link_config.autoneg = AUTONEG_ENABLE;
7935 netif_carrier_off(tp->dev);
7936 tp->link_config.active_speed = SPEED_INVALID;
7937 tp->link_config.active_duplex = DUPLEX_INVALID;
7938 tp->link_config.phy_is_low_power = 0;
7939 tp->link_config.orig_speed = SPEED_INVALID;
7940 tp->link_config.orig_duplex = DUPLEX_INVALID;
7941 tp->link_config.orig_autoneg = AUTONEG_INVALID;
7944 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
7946 tp->bufmgr_config.mbuf_read_dma_low_water =
7947 DEFAULT_MB_RDMA_LOW_WATER;
7948 tp->bufmgr_config.mbuf_mac_rx_low_water =
7949 DEFAULT_MB_MACRX_LOW_WATER;
7950 tp->bufmgr_config.mbuf_high_water =
7951 DEFAULT_MB_HIGH_WATER;
7953 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
7954 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
7955 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
7956 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
7957 tp->bufmgr_config.mbuf_high_water_jumbo =
7958 DEFAULT_MB_HIGH_WATER_JUMBO;
7960 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
7961 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
7964 static char * __devinit tg3_phy_string(struct tg3 *tp)
7966 switch (tp->phy_id & PHY_ID_MASK) {
7967 case PHY_ID_BCM5400: return "5400";
7968 case PHY_ID_BCM5401: return "5401";
7969 case PHY_ID_BCM5411: return "5411";
7970 case PHY_ID_BCM5701: return "5701";
7971 case PHY_ID_BCM5703: return "5703";
7972 case PHY_ID_BCM5704: return "5704";
7973 case PHY_ID_BCM5705: return "5705";
7974 case PHY_ID_BCM5750: return "5750";
7975 case PHY_ID_BCM8002: return "8002";
7976 case PHY_ID_SERDES: return "serdes";
7977 default: return "unknown";
7981 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
7983 struct pci_dev *peer;
7984 unsigned int func, devnr = tp->pdev->devfn & ~7;
7986 for (func = 0; func < 8; func++) {
7987 peer = pci_get_slot(tp->pdev->bus, devnr | func);
7988 if (peer && peer != tp->pdev)
7992 if (!peer || peer == tp->pdev)
7996 * We don't need to keep the refcount elevated; there's no way
7997 * to remove one half of this device without removing the other
8004 static int __devinit tg3_init_one(struct pci_dev *pdev,
8005 const struct pci_device_id *ent)
8007 static int tg3_version_printed = 0;
8008 unsigned long tg3reg_base, tg3reg_len;
8009 struct net_device *dev;
8011 int i, err, pci_using_dac, pm_cap;
8013 if (tg3_version_printed++ == 0)
8014 printk(KERN_INFO "%s", version);
8016 err = pci_enable_device(pdev);
8018 printk(KERN_ERR PFX "Cannot enable PCI device, "
8023 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8024 printk(KERN_ERR PFX "Cannot find proper PCI device "
8025 "base address, aborting.\n");
8027 goto err_out_disable_pdev;
8030 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8032 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8034 goto err_out_disable_pdev;
8037 pci_set_master(pdev);
8039 /* Find power-management capability. */
8040 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8042 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8045 goto err_out_free_res;
8048 /* Configure DMA attributes. */
8049 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8052 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8054 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8055 "for consistent allocations\n");
8056 goto err_out_free_res;
8059 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8061 printk(KERN_ERR PFX "No usable DMA configuration, "
8063 goto err_out_free_res;
8068 tg3reg_base = pci_resource_start(pdev, 0);
8069 tg3reg_len = pci_resource_len(pdev, 0);
8071 dev = alloc_etherdev(sizeof(*tp));
8073 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8075 goto err_out_free_res;
8078 SET_MODULE_OWNER(dev);
8079 SET_NETDEV_DEV(dev, &pdev->dev);
8082 dev->features |= NETIF_F_HIGHDMA;
8083 #if TG3_VLAN_TAG_USED
8084 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8085 dev->vlan_rx_register = tg3_vlan_rx_register;
8086 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8089 tp = netdev_priv(dev);
8092 tp->pm_cap = pm_cap;
8093 tp->mac_mode = TG3_DEF_MAC_MODE;
8094 tp->rx_mode = TG3_DEF_RX_MODE;
8095 tp->tx_mode = TG3_DEF_TX_MODE;
8096 tp->mi_mode = MAC_MI_MODE_BASE;
8098 tp->msg_enable = tg3_debug;
8100 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8102 /* The word/byte swap controls here control register access byte
8103 * swapping. DMA data byte swapping is controlled in the GRC_MODE
8106 tp->misc_host_ctrl =
8107 MISC_HOST_CTRL_MASK_PCI_INT |
8108 MISC_HOST_CTRL_WORD_SWAP |
8109 MISC_HOST_CTRL_INDIR_ACCESS |
8110 MISC_HOST_CTRL_PCISTATE_RW;
8112 /* The NONFRM (non-frame) byte/word swap controls take effect
8113 * on descriptor entries, anything which isn't packet data.
8115 * The StrongARM chips on the board (one for tx, one for rx)
8116 * are running in big-endian mode.
8118 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8119 GRC_MODE_WSWAP_NONFRM_DATA);
8121 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8123 spin_lock_init(&tp->lock);
8124 spin_lock_init(&tp->tx_lock);
8125 spin_lock_init(&tp->indirect_lock);
8126 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8128 tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
8129 if (tp->regs == 0UL) {
8130 printk(KERN_ERR PFX "Cannot map device registers, "
8133 goto err_out_free_dev;
8136 tg3_init_link_config(tp);
8138 tg3_init_bufmgr_config(tp);
8140 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8141 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8142 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8144 dev->open = tg3_open;
8145 dev->stop = tg3_close;
8146 dev->get_stats = tg3_get_stats;
8147 dev->set_multicast_list = tg3_set_rx_mode;
8148 dev->set_mac_address = tg3_set_mac_addr;
8149 dev->do_ioctl = tg3_ioctl;
8150 dev->tx_timeout = tg3_tx_timeout;
8151 dev->poll = tg3_poll;
8152 dev->ethtool_ops = &tg3_ethtool_ops;
8154 dev->watchdog_timeo = TG3_TX_TIMEOUT;
8155 dev->change_mtu = tg3_change_mtu;
8156 dev->irq = pdev->irq;
8157 #ifdef CONFIG_NET_POLL_CONTROLLER
8158 dev->poll_controller = tg3_poll_controller;
8161 err = tg3_get_invariants(tp);
8163 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8165 goto err_out_iounmap;
8168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8170 tp->bufmgr_config.mbuf_read_dma_low_water =
8171 DEFAULT_MB_RDMA_LOW_WATER_5705;
8172 tp->bufmgr_config.mbuf_mac_rx_low_water =
8173 DEFAULT_MB_MACRX_LOW_WATER_5705;
8174 tp->bufmgr_config.mbuf_high_water =
8175 DEFAULT_MB_HIGH_WATER_5705;
8178 #if TG3_TSO_SUPPORT != 0
8179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8181 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8182 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8183 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8184 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8186 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8189 /* TSO is off by default, user can enable using ethtool. */
8191 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8192 dev->features |= NETIF_F_TSO;
8197 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8198 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8199 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8200 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8201 tp->rx_pending = 63;
8204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8205 tp->pdev_peer = tg3_find_5704_peer(tp);
8207 err = tg3_get_device_address(tp);
8209 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8211 goto err_out_iounmap;
8215 * Reset chip in case UNDI or EFI driver did not shutdown
8216 * DMA self test will enable WDMAC and we'll see (spurious)
8217 * pending DMA on the PCI bus at that point.
8219 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8220 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8221 pci_save_state(tp->pdev, tp->pci_cfg_state);
8222 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8226 err = tg3_test_dma(tp);
8228 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8229 goto err_out_iounmap;
8232 /* Tigon3 can do ipv4 only... and some chips have buggy
8235 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8236 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8237 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8239 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8241 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8242 dev->features &= ~NETIF_F_HIGHDMA;
8244 err = register_netdev(dev);
8246 printk(KERN_ERR PFX "Cannot register net device, "
8248 goto err_out_iounmap;
8251 pci_set_drvdata(pdev, dev);
8253 /* Now that we have fully setup the chip, save away a snapshot
8254 * of the PCI config space. We need to restore this after
8255 * GRC_MISC_CFG core clock resets and some resume events.
8257 pci_save_state(tp->pdev, tp->pci_cfg_state);
8259 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8261 tp->board_part_number,
8262 tp->pci_chip_rev_id,
8264 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8265 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8266 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8267 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8268 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8269 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8271 for (i = 0; i < 6; i++)
8272 printk("%2.2x%c", dev->dev_addr[i],
8273 i == 5 ? '\n' : ':');
8275 printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
8276 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8279 (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
8280 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8281 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8282 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8283 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8284 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8285 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8286 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8291 iounmap((void *) tp->regs);
8297 pci_release_regions(pdev);
8299 err_out_disable_pdev:
8300 pci_disable_device(pdev);
8301 pci_set_drvdata(pdev, NULL);
8305 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8307 struct net_device *dev = pci_get_drvdata(pdev);
8310 struct tg3 *tp = netdev_priv(dev);
8312 unregister_netdev(dev);
8313 iounmap((void *)tp->regs);
8315 pci_release_regions(pdev);
8316 pci_disable_device(pdev);
8317 pci_set_drvdata(pdev, NULL);
8321 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8323 struct net_device *dev = pci_get_drvdata(pdev);
8324 struct tg3 *tp = netdev_priv(dev);
8327 if (!netif_running(dev))
8332 del_timer_sync(&tp->timer);
8334 spin_lock_irq(&tp->lock);
8335 spin_lock(&tp->tx_lock);
8336 tg3_disable_ints(tp);
8337 spin_unlock(&tp->tx_lock);
8338 spin_unlock_irq(&tp->lock);
8340 netif_device_detach(dev);
8342 spin_lock_irq(&tp->lock);
8343 spin_lock(&tp->tx_lock);
8345 spin_unlock(&tp->tx_lock);
8346 spin_unlock_irq(&tp->lock);
8348 err = tg3_set_power_state(tp, state);
8350 spin_lock_irq(&tp->lock);
8351 spin_lock(&tp->tx_lock);
8355 tp->timer.expires = jiffies + tp->timer_offset;
8356 add_timer(&tp->timer);
8358 spin_unlock(&tp->tx_lock);
8359 spin_unlock_irq(&tp->lock);
8361 netif_device_attach(dev);
8362 tg3_netif_start(tp);
8368 static int tg3_resume(struct pci_dev *pdev)
8370 struct net_device *dev = pci_get_drvdata(pdev);
8371 struct tg3 *tp = netdev_priv(dev);
8374 if (!netif_running(dev))
8377 pci_restore_state(tp->pdev, tp->pci_cfg_state);
8379 err = tg3_set_power_state(tp, 0);
8383 netif_device_attach(dev);
8385 spin_lock_irq(&tp->lock);
8386 spin_lock(&tp->tx_lock);
8390 tp->timer.expires = jiffies + tp->timer_offset;
8391 add_timer(&tp->timer);
8393 tg3_enable_ints(tp);
8395 spin_unlock(&tp->tx_lock);
8396 spin_unlock_irq(&tp->lock);
8398 tg3_netif_start(tp);
8403 static struct pci_driver tg3_driver = {
8404 .name = DRV_MODULE_NAME,
8405 .id_table = tg3_pci_tbl,
8406 .probe = tg3_init_one,
8407 .remove = __devexit_p(tg3_remove_one),
8408 .suspend = tg3_suspend,
8409 .resume = tg3_resume
8412 static int __init tg3_init(void)
8414 return pci_module_init(&tg3_driver);
8417 static void __exit tg3_cleanup(void)
8419 pci_unregister_driver(&tg3_driver);
8422 module_init(tg3_init);
8423 module_exit(tg3_cleanup);