This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  *
8  * Firmware is:
9  *      Copyright (C) 2000-2003 Broadcom Corporation.
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
33
34 #include <net/checksum.h>
35
36 #include <asm/system.h>
37 #include <asm/io.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
40
41 #ifdef CONFIG_SPARC64
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
44 #include <asm/pbm.h>
45 #endif
46
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
49 #else
50 #define TG3_VLAN_TAG_USED 0
51 #endif
52
53 #ifdef NETIF_F_TSO
54 #define TG3_TSO_SUPPORT 1
55 #else
56 #define TG3_TSO_SUPPORT 0
57 #endif
58
59 #include "tg3.h"
60
61 #define DRV_MODULE_NAME         "tg3"
62 #define PFX DRV_MODULE_NAME     ": "
63 #define DRV_MODULE_VERSION      "3.10"
64 #define DRV_MODULE_RELDATE      "September 14, 2004"
65
66 #define TG3_DEF_MAC_MODE        0
67 #define TG3_DEF_RX_MODE         0
68 #define TG3_DEF_TX_MODE         0
69 #define TG3_DEF_MSG_ENABLE        \
70         (NETIF_MSG_DRV          | \
71          NETIF_MSG_PROBE        | \
72          NETIF_MSG_LINK         | \
73          NETIF_MSG_TIMER        | \
74          NETIF_MSG_IFDOWN       | \
75          NETIF_MSG_IFUP         | \
76          NETIF_MSG_RX_ERR       | \
77          NETIF_MSG_TX_ERR)
78
79 /* length of time before we decide the hardware is borked,
80  * and dev->tx_timeout() should be called to fix the problem
81  */
82 #define TG3_TX_TIMEOUT                  (5 * HZ)
83
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU                     60
86 #define TG3_MAX_MTU(tp) \
87         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
108          512 : 1024)
109
110 #define TG3_TX_RING_SIZE                512
111 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
112
113 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_RING_SIZE)
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116                                  TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118                                    TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
120                                  TG3_TX_RING_SIZE)
121 #define TX_RING_GAP(TP) \
122         (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP)                                              \
124         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
125           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
126           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 static char version[] __devinitdata =
139         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_PARM(tg3_debug, "i");
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149
150 static struct pci_device_id tg3_pci_tbl[] = {
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { 0, }
224 };
225
226 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
227
228 struct {
229         char string[ETH_GSTRING_LEN];
230 } ethtool_stats_keys[TG3_NUM_STATS] = {
231         { "rx_octets" },
232         { "rx_fragments" },
233         { "rx_ucast_packets" },
234         { "rx_mcast_packets" },
235         { "rx_bcast_packets" },
236         { "rx_fcs_errors" },
237         { "rx_align_errors" },
238         { "rx_xon_pause_rcvd" },
239         { "rx_xoff_pause_rcvd" },
240         { "rx_mac_ctrl_rcvd" },
241         { "rx_xoff_entered" },
242         { "rx_frame_too_long_errors" },
243         { "rx_jabbers" },
244         { "rx_undersize_packets" },
245         { "rx_in_length_errors" },
246         { "rx_out_length_errors" },
247         { "rx_64_or_less_octet_packets" },
248         { "rx_65_to_127_octet_packets" },
249         { "rx_128_to_255_octet_packets" },
250         { "rx_256_to_511_octet_packets" },
251         { "rx_512_to_1023_octet_packets" },
252         { "rx_1024_to_1522_octet_packets" },
253         { "rx_1523_to_2047_octet_packets" },
254         { "rx_2048_to_4095_octet_packets" },
255         { "rx_4096_to_8191_octet_packets" },
256         { "rx_8192_to_9022_octet_packets" },
257
258         { "tx_octets" },
259         { "tx_collisions" },
260
261         { "tx_xon_sent" },
262         { "tx_xoff_sent" },
263         { "tx_flow_control" },
264         { "tx_mac_errors" },
265         { "tx_single_collisions" },
266         { "tx_mult_collisions" },
267         { "tx_deferred" },
268         { "tx_excessive_collisions" },
269         { "tx_late_collisions" },
270         { "tx_collide_2times" },
271         { "tx_collide_3times" },
272         { "tx_collide_4times" },
273         { "tx_collide_5times" },
274         { "tx_collide_6times" },
275         { "tx_collide_7times" },
276         { "tx_collide_8times" },
277         { "tx_collide_9times" },
278         { "tx_collide_10times" },
279         { "tx_collide_11times" },
280         { "tx_collide_12times" },
281         { "tx_collide_13times" },
282         { "tx_collide_14times" },
283         { "tx_collide_15times" },
284         { "tx_ucast_packets" },
285         { "tx_mcast_packets" },
286         { "tx_bcast_packets" },
287         { "tx_carrier_sense_errors" },
288         { "tx_discards" },
289         { "tx_errors" },
290
291         { "dma_writeq_full" },
292         { "dma_write_prioq_full" },
293         { "rxbds_empty" },
294         { "rx_discards" },
295         { "rx_errors" },
296         { "rx_threshold_hit" },
297
298         { "dma_readq_full" },
299         { "dma_read_prioq_full" },
300         { "tx_comp_queue_full" },
301
302         { "ring_set_send_prod_index" },
303         { "ring_status_update" },
304         { "nic_irqs" },
305         { "nic_avoided_irqs" },
306         { "nic_tx_threshold_hit" }
307 };
308
309 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
310 {
311         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
312                 unsigned long flags;
313
314                 spin_lock_irqsave(&tp->indirect_lock, flags);
315                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
316                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
317                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
318         } else {
319                 writel(val, tp->regs + off);
320                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
321                         readl(tp->regs + off);
322         }
323 }
324
325 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
326 {
327         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
328                 unsigned long flags;
329
330                 spin_lock_irqsave(&tp->indirect_lock, flags);
331                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
332                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
333                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
334         } else {
335                 void __iomem *dest = tp->regs + off;
336                 writel(val, dest);
337                 readl(dest);    /* always flush PCI write */
338         }
339 }
340
341 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
342 {
343         void __iomem *mbox = tp->regs + off;
344         writel(val, mbox);
345         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
346                 readl(mbox);
347 }
348
349 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
354                 writel(val, mbox);
355         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
356                 readl(mbox);
357 }
358
359 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
360 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
361 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
362
363 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
364 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
365 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
366 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
367 #define tr32(reg)               readl(tp->regs + (reg))
368 #define tr16(reg)               readw(tp->regs + (reg))
369 #define tr8(reg)                readb(tp->regs + (reg))
370
371 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
378
379         /* Always leave this as zero. */
380         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
381         spin_unlock_irqrestore(&tp->indirect_lock, flags);
382 }
383
384 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
385 {
386         unsigned long flags;
387
388         spin_lock_irqsave(&tp->indirect_lock, flags);
389         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
390         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
391
392         /* Always leave this as zero. */
393         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
394         spin_unlock_irqrestore(&tp->indirect_lock, flags);
395 }
396
397 static void tg3_disable_ints(struct tg3 *tp)
398 {
399         tw32(TG3PCI_MISC_HOST_CTRL,
400              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
401         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
402         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
403 }
404
405 static inline void tg3_cond_int(struct tg3 *tp)
406 {
407         if (tp->hw_status->status & SD_STATUS_UPDATED)
408                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
409 }
410
411 static void tg3_enable_ints(struct tg3 *tp)
412 {
413         tw32(TG3PCI_MISC_HOST_CTRL,
414              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
415         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
416         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
417
418         tg3_cond_int(tp);
419 }
420
421 static inline void tg3_netif_stop(struct tg3 *tp)
422 {
423         netif_poll_disable(tp->dev);
424         netif_tx_disable(tp->dev);
425 }
426
427 static inline void tg3_netif_start(struct tg3 *tp)
428 {
429         netif_wake_queue(tp->dev);
430         /* NOTE: unconditional netif_wake_queue is only appropriate
431          * so long as all callers are assured to have free tx slots
432          * (such as after tg3_init_hw)
433          */
434         netif_poll_enable(tp->dev);
435         tg3_cond_int(tp);
436 }
437
438 static void tg3_switch_clocks(struct tg3 *tp)
439 {
440         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
441         u32 orig_clock_ctrl;
442
443         orig_clock_ctrl = clock_ctrl;
444         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
445                        CLOCK_CTRL_CLKRUN_OENABLE |
446                        0x1f);
447         tp->pci_clock_ctrl = clock_ctrl;
448
449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
451                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
452                         tw32_f(TG3PCI_CLOCK_CTRL,
453                                clock_ctrl | CLOCK_CTRL_625_CORE);
454                         udelay(40);
455                 }
456         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
457                 tw32_f(TG3PCI_CLOCK_CTRL,
458                      clock_ctrl |
459                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
460                 udelay(40);
461                 tw32_f(TG3PCI_CLOCK_CTRL,
462                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
463                 udelay(40);
464         }
465         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
466         udelay(40);
467 }
468
469 #define PHY_BUSY_LOOPS  5000
470
471 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
472 {
473         u32 frame_val;
474         int loops, ret;
475
476         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
477                 tw32_f(MAC_MI_MODE,
478                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
479                 udelay(80);
480         }
481
482         *val = 0xffffffff;
483
484         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
485                       MI_COM_PHY_ADDR_MASK);
486         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
487                       MI_COM_REG_ADDR_MASK);
488         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
489         
490         tw32_f(MAC_MI_COM, frame_val);
491
492         loops = PHY_BUSY_LOOPS;
493         while (loops-- > 0) {
494                 udelay(10);
495                 frame_val = tr32(MAC_MI_COM);
496
497                 if ((frame_val & MI_COM_BUSY) == 0) {
498                         udelay(5);
499                         frame_val = tr32(MAC_MI_COM);
500                         break;
501                 }
502         }
503
504         ret = -EBUSY;
505         if (loops > 0) {
506                 *val = frame_val & MI_COM_DATA_MASK;
507                 ret = 0;
508         }
509
510         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
511                 tw32_f(MAC_MI_MODE, tp->mi_mode);
512                 udelay(80);
513         }
514
515         return ret;
516 }
517
518 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
519 {
520         u32 frame_val;
521         int loops, ret;
522
523         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
524                 tw32_f(MAC_MI_MODE,
525                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
526                 udelay(80);
527         }
528
529         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
530                       MI_COM_PHY_ADDR_MASK);
531         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
532                       MI_COM_REG_ADDR_MASK);
533         frame_val |= (val & MI_COM_DATA_MASK);
534         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
535         
536         tw32_f(MAC_MI_COM, frame_val);
537
538         loops = PHY_BUSY_LOOPS;
539         while (loops-- > 0) {
540                 udelay(10);
541                 frame_val = tr32(MAC_MI_COM);
542                 if ((frame_val & MI_COM_BUSY) == 0) {
543                         udelay(5);
544                         frame_val = tr32(MAC_MI_COM);
545                         break;
546                 }
547         }
548
549         ret = -EBUSY;
550         if (loops > 0)
551                 ret = 0;
552
553         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
554                 tw32_f(MAC_MI_MODE, tp->mi_mode);
555                 udelay(80);
556         }
557
558         return ret;
559 }
560
561 static void tg3_phy_set_wirespeed(struct tg3 *tp)
562 {
563         u32 val;
564
565         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
566                 return;
567
568         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
569         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
570         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
571 }
572
573 static int tg3_bmcr_reset(struct tg3 *tp)
574 {
575         u32 phy_control;
576         int limit, err;
577
578         /* OK, reset it, and poll the BMCR_RESET bit until it
579          * clears or we time out.
580          */
581         phy_control = BMCR_RESET;
582         err = tg3_writephy(tp, MII_BMCR, phy_control);
583         if (err != 0)
584                 return -EBUSY;
585
586         limit = 5000;
587         while (limit--) {
588                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
589                 if (err != 0)
590                         return -EBUSY;
591
592                 if ((phy_control & BMCR_RESET) == 0) {
593                         udelay(40);
594                         break;
595                 }
596                 udelay(10);
597         }
598         if (limit <= 0)
599                 return -EBUSY;
600
601         return 0;
602 }
603
604 static int tg3_wait_macro_done(struct tg3 *tp)
605 {
606         int limit = 100;
607
608         while (limit--) {
609                 u32 tmp32;
610
611                 tg3_readphy(tp, 0x16, &tmp32);
612                 if ((tmp32 & 0x1000) == 0)
613                         break;
614         }
615         if (limit <= 0)
616                 return -EBUSY;
617
618         return 0;
619 }
620
621 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
622 {
623         static const u32 test_pat[4][6] = {
624         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
625         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
626         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
627         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
628         };
629         int chan;
630
631         for (chan = 0; chan < 4; chan++) {
632                 int i;
633
634                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
635                              (chan * 0x2000) | 0x0200);
636                 tg3_writephy(tp, 0x16, 0x0002);
637
638                 for (i = 0; i < 6; i++)
639                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
640                                      test_pat[chan][i]);
641
642                 tg3_writephy(tp, 0x16, 0x0202);
643                 if (tg3_wait_macro_done(tp)) {
644                         *resetp = 1;
645                         return -EBUSY;
646                 }
647
648                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
649                              (chan * 0x2000) | 0x0200);
650                 tg3_writephy(tp, 0x16, 0x0082);
651                 if (tg3_wait_macro_done(tp)) {
652                         *resetp = 1;
653                         return -EBUSY;
654                 }
655
656                 tg3_writephy(tp, 0x16, 0x0802);
657                 if (tg3_wait_macro_done(tp)) {
658                         *resetp = 1;
659                         return -EBUSY;
660                 }
661
662                 for (i = 0; i < 6; i += 2) {
663                         u32 low, high;
664
665                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
666                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
667                         if (tg3_wait_macro_done(tp)) {
668                                 *resetp = 1;
669                                 return -EBUSY;
670                         }
671                         low &= 0x7fff;
672                         high &= 0x000f;
673                         if (low != test_pat[chan][i] ||
674                             high != test_pat[chan][i+1]) {
675                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
676                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
677                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
678
679                                 return -EBUSY;
680                         }
681                 }
682         }
683
684         return 0;
685 }
686
687 static int tg3_phy_reset_chanpat(struct tg3 *tp)
688 {
689         int chan;
690
691         for (chan = 0; chan < 4; chan++) {
692                 int i;
693
694                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
695                              (chan * 0x2000) | 0x0200);
696                 tg3_writephy(tp, 0x16, 0x0002);
697                 for (i = 0; i < 6; i++)
698                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
699                 tg3_writephy(tp, 0x16, 0x0202);
700                 if (tg3_wait_macro_done(tp))
701                         return -EBUSY;
702         }
703
704         return 0;
705 }
706
707 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
708 {
709         u32 reg32, phy9_orig;
710         int retries, do_phy_reset, err;
711
712         retries = 10;
713         do_phy_reset = 1;
714         do {
715                 if (do_phy_reset) {
716                         err = tg3_bmcr_reset(tp);
717                         if (err)
718                                 return err;
719                         do_phy_reset = 0;
720                 }
721
722                 /* Disable transmitter and interrupt.  */
723                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
724                 reg32 |= 0x3000;
725                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
726
727                 /* Set full-duplex, 1000 mbps.  */
728                 tg3_writephy(tp, MII_BMCR,
729                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
730
731                 /* Set to master mode.  */
732                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
733                 tg3_writephy(tp, MII_TG3_CTRL,
734                              (MII_TG3_CTRL_AS_MASTER |
735                               MII_TG3_CTRL_ENABLE_AS_MASTER));
736
737                 /* Enable SM_DSP_CLOCK and 6dB.  */
738                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
739
740                 /* Block the PHY control access.  */
741                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
742                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
743
744                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
745                 if (!err)
746                         break;
747         } while (--retries);
748
749         err = tg3_phy_reset_chanpat(tp);
750         if (err)
751                 return err;
752
753         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
754         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
755
756         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
757         tg3_writephy(tp, 0x16, 0x0000);
758
759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
761                 /* Set Extended packet length bit for jumbo frames */
762                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
763         }
764         else {
765                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
766         }
767
768         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
769
770         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
771         reg32 &= ~0x3000;
772         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
773
774         return err;
775 }
776
777 /* This will reset the tigon3 PHY if there is no valid
778  * link unless the FORCE argument is non-zero.
779  */
780 static int tg3_phy_reset(struct tg3 *tp)
781 {
782         u32 phy_status;
783         int err;
784
785         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
786         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
787         if (err != 0)
788                 return -EBUSY;
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
793                 err = tg3_phy_reset_5703_4_5(tp);
794                 if (err)
795                         return err;
796                 goto out;
797         }
798
799         err = tg3_bmcr_reset(tp);
800         if (err)
801                 return err;
802
803 out:
804         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
805                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
806                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
807                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
808                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
809                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
810                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
811         }
812         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
813                 tg3_writephy(tp, 0x1c, 0x8d68);
814                 tg3_writephy(tp, 0x1c, 0x8d68);
815         }
816         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
817                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
818                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
819                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
820                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
821                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
822                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
823                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
824                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
825         }
826         /* Set Extended packet length bit (bit 14) on all chips that */
827         /* support jumbo frames */
828         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
829                 /* Cannot do read-modify-write on 5401 */
830                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
831         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
832                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
833                 u32 phy_reg;
834
835                 /* Set bit 14 with read-modify-write to preserve other bits */
836                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
837                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
839         }
840         tg3_phy_set_wirespeed(tp);
841         return 0;
842 }
843
844 static void tg3_frob_aux_power(struct tg3 *tp)
845 {
846         struct tg3 *tp_peer = tp;
847
848         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
849                 return;
850
851         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
852                 tp_peer = pci_get_drvdata(tp->pdev_peer);
853                 if (!tp_peer)
854                         BUG();
855         }
856
857
858         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
859             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
860                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
861                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
862                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
863                              (GRC_LCLCTRL_GPIO_OE0 |
864                               GRC_LCLCTRL_GPIO_OE1 |
865                               GRC_LCLCTRL_GPIO_OE2 |
866                               GRC_LCLCTRL_GPIO_OUTPUT0 |
867                               GRC_LCLCTRL_GPIO_OUTPUT1));
868                         udelay(100);
869                 } else {
870                         if (tp_peer != tp &&
871                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
872                                 return;
873
874                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
875                              (GRC_LCLCTRL_GPIO_OE0 |
876                               GRC_LCLCTRL_GPIO_OE1 |
877                               GRC_LCLCTRL_GPIO_OE2 |
878                               GRC_LCLCTRL_GPIO_OUTPUT1 |
879                               GRC_LCLCTRL_GPIO_OUTPUT2));
880                         udelay(100);
881
882                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
883                              (GRC_LCLCTRL_GPIO_OE0 |
884                               GRC_LCLCTRL_GPIO_OE1 |
885                               GRC_LCLCTRL_GPIO_OE2 |
886                               GRC_LCLCTRL_GPIO_OUTPUT0 |
887                               GRC_LCLCTRL_GPIO_OUTPUT1 |
888                               GRC_LCLCTRL_GPIO_OUTPUT2));
889                         udelay(100);
890
891                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
892                              (GRC_LCLCTRL_GPIO_OE0 |
893                               GRC_LCLCTRL_GPIO_OE1 |
894                               GRC_LCLCTRL_GPIO_OE2 |
895                               GRC_LCLCTRL_GPIO_OUTPUT0 |
896                               GRC_LCLCTRL_GPIO_OUTPUT1));
897                         udelay(100);
898                 }
899         } else {
900                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
901                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
902                         if (tp_peer != tp &&
903                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
904                                 return;
905
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE1 |
908                               GRC_LCLCTRL_GPIO_OUTPUT1));
909                         udelay(100);
910
911                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
912                              (GRC_LCLCTRL_GPIO_OE1));
913                         udelay(100);
914
915                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
916                              (GRC_LCLCTRL_GPIO_OE1 |
917                               GRC_LCLCTRL_GPIO_OUTPUT1));
918                         udelay(100);
919                 }
920         }
921 }
922
923 static int tg3_setup_phy(struct tg3 *, int);
924
925 #define RESET_KIND_SHUTDOWN     0
926 #define RESET_KIND_INIT         1
927 #define RESET_KIND_SUSPEND      2
928
929 static void tg3_write_sig_post_reset(struct tg3 *, int);
930
931 static int tg3_set_power_state(struct tg3 *tp, int state)
932 {
933         u32 misc_host_ctrl;
934         u16 power_control, power_caps;
935         int pm = tp->pm_cap;
936
937         /* Make sure register accesses (indirect or otherwise)
938          * will function correctly.
939          */
940         pci_write_config_dword(tp->pdev,
941                                TG3PCI_MISC_HOST_CTRL,
942                                tp->misc_host_ctrl);
943
944         pci_read_config_word(tp->pdev,
945                              pm + PCI_PM_CTRL,
946                              &power_control);
947         power_control |= PCI_PM_CTRL_PME_STATUS;
948         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
949         switch (state) {
950         case 0:
951                 power_control |= 0;
952                 pci_write_config_word(tp->pdev,
953                                       pm + PCI_PM_CTRL,
954                                       power_control);
955                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
956                 udelay(100);
957
958                 return 0;
959
960         case 1:
961                 power_control |= 1;
962                 break;
963
964         case 2:
965                 power_control |= 2;
966                 break;
967
968         case 3:
969                 power_control |= 3;
970                 break;
971
972         default:
973                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
974                        "requested.\n",
975                        tp->dev->name, state);
976                 return -EINVAL;
977         };
978
979         power_control |= PCI_PM_CTRL_PME_ENABLE;
980
981         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
982         tw32(TG3PCI_MISC_HOST_CTRL,
983              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
984
985         if (tp->link_config.phy_is_low_power == 0) {
986                 tp->link_config.phy_is_low_power = 1;
987                 tp->link_config.orig_speed = tp->link_config.speed;
988                 tp->link_config.orig_duplex = tp->link_config.duplex;
989                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
990         }
991
992         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
993                 tp->link_config.speed = SPEED_10;
994                 tp->link_config.duplex = DUPLEX_HALF;
995                 tp->link_config.autoneg = AUTONEG_ENABLE;
996                 tg3_setup_phy(tp, 0);
997         }
998
999         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1000
1001         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1002                 u32 mac_mode;
1003
1004                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1005                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1006                         udelay(40);
1007
1008                         mac_mode = MAC_MODE_PORT_MODE_MII;
1009
1010                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1011                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1012                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1013                 } else {
1014                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1015                 }
1016
1017                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1018                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1019
1020                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1021                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1022                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1023
1024                 tw32_f(MAC_MODE, mac_mode);
1025                 udelay(100);
1026
1027                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1028                 udelay(10);
1029         }
1030
1031         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1032             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1033              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1034                 u32 base_val;
1035
1036                 base_val = tp->pci_clock_ctrl;
1037                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1038                              CLOCK_CTRL_TXCLK_DISABLE);
1039
1040                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1041                      CLOCK_CTRL_ALTCLK |
1042                      CLOCK_CTRL_PWRDOWN_PLL133);
1043                 udelay(40);
1044         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1045                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1046                 u32 newbits1, newbits2;
1047
1048                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1049                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1050                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1051                                     CLOCK_CTRL_TXCLK_DISABLE |
1052                                     CLOCK_CTRL_ALTCLK);
1053                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1054                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1055                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1056                         newbits1 = CLOCK_CTRL_625_CORE;
1057                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1058                 } else {
1059                         newbits1 = CLOCK_CTRL_ALTCLK;
1060                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1061                 }
1062
1063                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1064                 udelay(40);
1065
1066                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1067                 udelay(40);
1068
1069                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1070                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1071                         u32 newbits3;
1072
1073                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1074                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1075                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1076                                             CLOCK_CTRL_TXCLK_DISABLE |
1077                                             CLOCK_CTRL_44MHZ_CORE);
1078                         } else {
1079                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1080                         }
1081
1082                         tw32_f(TG3PCI_CLOCK_CTRL,
1083                                          tp->pci_clock_ctrl | newbits3);
1084                         udelay(40);
1085                 }
1086         }
1087
1088         tg3_frob_aux_power(tp);
1089
1090         /* Finally, set the new power state. */
1091         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1092
1093         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1094
1095         return 0;
1096 }
1097
1098 static void tg3_link_report(struct tg3 *tp)
1099 {
1100         if (!netif_carrier_ok(tp->dev)) {
1101                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1102         } else {
1103                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1104                        tp->dev->name,
1105                        (tp->link_config.active_speed == SPEED_1000 ?
1106                         1000 :
1107                         (tp->link_config.active_speed == SPEED_100 ?
1108                          100 : 10)),
1109                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1110                         "full" : "half"));
1111
1112                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1113                        "%s for RX.\n",
1114                        tp->dev->name,
1115                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1116                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1117         }
1118 }
1119
1120 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1121 {
1122         u32 new_tg3_flags = 0;
1123         u32 old_rx_mode = tp->rx_mode;
1124         u32 old_tx_mode = tp->tx_mode;
1125
1126         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1127                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1128                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1129                                 if (remote_adv & LPA_PAUSE_CAP)
1130                                         new_tg3_flags |=
1131                                                 (TG3_FLAG_RX_PAUSE |
1132                                                 TG3_FLAG_TX_PAUSE);
1133                                 else if (remote_adv & LPA_PAUSE_ASYM)
1134                                         new_tg3_flags |=
1135                                                 (TG3_FLAG_RX_PAUSE);
1136                         } else {
1137                                 if (remote_adv & LPA_PAUSE_CAP)
1138                                         new_tg3_flags |=
1139                                                 (TG3_FLAG_RX_PAUSE |
1140                                                 TG3_FLAG_TX_PAUSE);
1141                         }
1142                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1143                         if ((remote_adv & LPA_PAUSE_CAP) &&
1144                         (remote_adv & LPA_PAUSE_ASYM))
1145                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1146                 }
1147
1148                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1149                 tp->tg3_flags |= new_tg3_flags;
1150         } else {
1151                 new_tg3_flags = tp->tg3_flags;
1152         }
1153
1154         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1155                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1156         else
1157                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1158
1159         if (old_rx_mode != tp->rx_mode) {
1160                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1161         }
1162         
1163         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1164                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1165         else
1166                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1167
1168         if (old_tx_mode != tp->tx_mode) {
1169                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1170         }
1171 }
1172
1173 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1174 {
1175         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1176         case MII_TG3_AUX_STAT_10HALF:
1177                 *speed = SPEED_10;
1178                 *duplex = DUPLEX_HALF;
1179                 break;
1180
1181         case MII_TG3_AUX_STAT_10FULL:
1182                 *speed = SPEED_10;
1183                 *duplex = DUPLEX_FULL;
1184                 break;
1185
1186         case MII_TG3_AUX_STAT_100HALF:
1187                 *speed = SPEED_100;
1188                 *duplex = DUPLEX_HALF;
1189                 break;
1190
1191         case MII_TG3_AUX_STAT_100FULL:
1192                 *speed = SPEED_100;
1193                 *duplex = DUPLEX_FULL;
1194                 break;
1195
1196         case MII_TG3_AUX_STAT_1000HALF:
1197                 *speed = SPEED_1000;
1198                 *duplex = DUPLEX_HALF;
1199                 break;
1200
1201         case MII_TG3_AUX_STAT_1000FULL:
1202                 *speed = SPEED_1000;
1203                 *duplex = DUPLEX_FULL;
1204                 break;
1205
1206         default:
1207                 *speed = SPEED_INVALID;
1208                 *duplex = DUPLEX_INVALID;
1209                 break;
1210         };
1211 }
1212
1213 static int tg3_phy_copper_begin(struct tg3 *tp)
1214 {
1215         u32 new_adv;
1216         int i;
1217
1218         if (tp->link_config.phy_is_low_power) {
1219                 /* Entering low power mode.  Disable gigabit and
1220                  * 100baseT advertisements.
1221                  */
1222                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1223
1224                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1225                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1226                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1227                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1228
1229                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1230         } else if (tp->link_config.speed == SPEED_INVALID) {
1231                 tp->link_config.advertising =
1232                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1233                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1234                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1235                          ADVERTISED_Autoneg | ADVERTISED_MII);
1236
1237                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1238                         tp->link_config.advertising &=
1239                                 ~(ADVERTISED_1000baseT_Half |
1240                                   ADVERTISED_1000baseT_Full);
1241
1242                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1243                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1244                         new_adv |= ADVERTISE_10HALF;
1245                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1246                         new_adv |= ADVERTISE_10FULL;
1247                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1248                         new_adv |= ADVERTISE_100HALF;
1249                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1250                         new_adv |= ADVERTISE_100FULL;
1251                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1252
1253                 if (tp->link_config.advertising &
1254                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1255                         new_adv = 0;
1256                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1257                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1258                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1259                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1260                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1261                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1262                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1263                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1264                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1265                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1266                 } else {
1267                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1268                 }
1269         } else {
1270                 /* Asking for a specific link mode. */
1271                 if (tp->link_config.speed == SPEED_1000) {
1272                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1273                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1274
1275                         if (tp->link_config.duplex == DUPLEX_FULL)
1276                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1277                         else
1278                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1279                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1280                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1281                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1282                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1283                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1284                 } else {
1285                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1286
1287                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1288                         if (tp->link_config.speed == SPEED_100) {
1289                                 if (tp->link_config.duplex == DUPLEX_FULL)
1290                                         new_adv |= ADVERTISE_100FULL;
1291                                 else
1292                                         new_adv |= ADVERTISE_100HALF;
1293                         } else {
1294                                 if (tp->link_config.duplex == DUPLEX_FULL)
1295                                         new_adv |= ADVERTISE_10FULL;
1296                                 else
1297                                         new_adv |= ADVERTISE_10HALF;
1298                         }
1299                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1300                 }
1301         }
1302
1303         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1304             tp->link_config.speed != SPEED_INVALID) {
1305                 u32 bmcr, orig_bmcr;
1306
1307                 tp->link_config.active_speed = tp->link_config.speed;
1308                 tp->link_config.active_duplex = tp->link_config.duplex;
1309
1310                 bmcr = 0;
1311                 switch (tp->link_config.speed) {
1312                 default:
1313                 case SPEED_10:
1314                         break;
1315
1316                 case SPEED_100:
1317                         bmcr |= BMCR_SPEED100;
1318                         break;
1319
1320                 case SPEED_1000:
1321                         bmcr |= TG3_BMCR_SPEED1000;
1322                         break;
1323                 };
1324
1325                 if (tp->link_config.duplex == DUPLEX_FULL)
1326                         bmcr |= BMCR_FULLDPLX;
1327
1328                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1329                 if (bmcr != orig_bmcr) {
1330                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1331                         for (i = 0; i < 1500; i++) {
1332                                 u32 tmp;
1333
1334                                 udelay(10);
1335                                 tg3_readphy(tp, MII_BMSR, &tmp);
1336                                 tg3_readphy(tp, MII_BMSR, &tmp);
1337                                 if (!(tmp & BMSR_LSTATUS)) {
1338                                         udelay(40);
1339                                         break;
1340                                 }
1341                         }
1342                         tg3_writephy(tp, MII_BMCR, bmcr);
1343                         udelay(40);
1344                 }
1345         } else {
1346                 tg3_writephy(tp, MII_BMCR,
1347                              BMCR_ANENABLE | BMCR_ANRESTART);
1348         }
1349
1350         return 0;
1351 }
1352
1353 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1354 {
1355         int err;
1356
1357         /* Turn off tap power management. */
1358         /* Set Extended packet length bit */
1359         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1360
1361         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1362         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1363
1364         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1365         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1366
1367         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1368         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1369
1370         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1371         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1372
1373         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1374         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1375
1376         udelay(40);
1377
1378         return err;
1379 }
1380
1381 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1382 {
1383         u32 adv_reg, all_mask;
1384
1385         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1386         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1387                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1388         if ((adv_reg & all_mask) != all_mask)
1389                 return 0;
1390         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1391                 u32 tg3_ctrl;
1392
1393                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1394                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1395                             MII_TG3_CTRL_ADV_1000_FULL);
1396                 if ((tg3_ctrl & all_mask) != all_mask)
1397                         return 0;
1398         }
1399         return 1;
1400 }
1401
1402 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1403 {
1404         int current_link_up;
1405         u32 bmsr, dummy;
1406         u16 current_speed;
1407         u8 current_duplex;
1408         int i, err;
1409
1410         tw32(MAC_EVENT, 0);
1411
1412         tw32_f(MAC_STATUS,
1413              (MAC_STATUS_SYNC_CHANGED |
1414               MAC_STATUS_CFG_CHANGED |
1415               MAC_STATUS_MI_COMPLETION |
1416               MAC_STATUS_LNKSTATE_CHANGED));
1417         udelay(40);
1418
1419         tp->mi_mode = MAC_MI_MODE_BASE;
1420         tw32_f(MAC_MI_MODE, tp->mi_mode);
1421         udelay(80);
1422
1423         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1424
1425         /* Some third-party PHYs need to be reset on link going
1426          * down.
1427          */
1428         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1429              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1430              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1431             netif_carrier_ok(tp->dev)) {
1432                 tg3_readphy(tp, MII_BMSR, &bmsr);
1433                 tg3_readphy(tp, MII_BMSR, &bmsr);
1434                 if (!(bmsr & BMSR_LSTATUS))
1435                         force_reset = 1;
1436         }
1437         if (force_reset)
1438                 tg3_phy_reset(tp);
1439
1440         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1441                 tg3_readphy(tp, MII_BMSR, &bmsr);
1442                 tg3_readphy(tp, MII_BMSR, &bmsr);
1443
1444                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1445                         bmsr = 0;
1446
1447                 if (!(bmsr & BMSR_LSTATUS)) {
1448                         err = tg3_init_5401phy_dsp(tp);
1449                         if (err)
1450                                 return err;
1451
1452                         tg3_readphy(tp, MII_BMSR, &bmsr);
1453                         for (i = 0; i < 1000; i++) {
1454                                 udelay(10);
1455                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1456                                 if (bmsr & BMSR_LSTATUS) {
1457                                         udelay(40);
1458                                         break;
1459                                 }
1460                         }
1461
1462                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1463                             !(bmsr & BMSR_LSTATUS) &&
1464                             tp->link_config.active_speed == SPEED_1000) {
1465                                 err = tg3_phy_reset(tp);
1466                                 if (!err)
1467                                         err = tg3_init_5401phy_dsp(tp);
1468                                 if (err)
1469                                         return err;
1470                         }
1471                 }
1472         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1473                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1474                 /* 5701 {A0,B0} CRC bug workaround */
1475                 tg3_writephy(tp, 0x15, 0x0a75);
1476                 tg3_writephy(tp, 0x1c, 0x8c68);
1477                 tg3_writephy(tp, 0x1c, 0x8d68);
1478                 tg3_writephy(tp, 0x1c, 0x8c68);
1479         }
1480
1481         /* Clear pending interrupts... */
1482         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1483         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1484
1485         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1486                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1487         else
1488                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1489
1490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1492                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1493                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1494                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1495                 else
1496                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1497         }
1498
1499         current_link_up = 0;
1500         current_speed = SPEED_INVALID;
1501         current_duplex = DUPLEX_INVALID;
1502
1503         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1504                 u32 val;
1505
1506                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1507                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1508                 if (!(val & (1 << 10))) {
1509                         val |= (1 << 10);
1510                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1511                         goto relink;
1512                 }
1513         }
1514
1515         bmsr = 0;
1516         for (i = 0; i < 100; i++) {
1517                 tg3_readphy(tp, MII_BMSR, &bmsr);
1518                 tg3_readphy(tp, MII_BMSR, &bmsr);
1519                 if (bmsr & BMSR_LSTATUS)
1520                         break;
1521                 udelay(40);
1522         }
1523
1524         if (bmsr & BMSR_LSTATUS) {
1525                 u32 aux_stat, bmcr;
1526
1527                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1528                 for (i = 0; i < 2000; i++) {
1529                         udelay(10);
1530                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1531                         if (aux_stat)
1532                                 break;
1533                 }
1534
1535                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1536                                              &current_speed,
1537                                              &current_duplex);
1538
1539                 bmcr = 0;
1540                 for (i = 0; i < 200; i++) {
1541                         tg3_readphy(tp, MII_BMCR, &bmcr);
1542                         tg3_readphy(tp, MII_BMCR, &bmcr);
1543                         if (bmcr && bmcr != 0x7fff)
1544                                 break;
1545                         udelay(10);
1546                 }
1547
1548                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1549                         if (bmcr & BMCR_ANENABLE) {
1550                                 current_link_up = 1;
1551
1552                                 /* Force autoneg restart if we are exiting
1553                                  * low power mode.
1554                                  */
1555                                 if (!tg3_copper_is_advertising_all(tp))
1556                                         current_link_up = 0;
1557                         } else {
1558                                 current_link_up = 0;
1559                         }
1560                 } else {
1561                         if (!(bmcr & BMCR_ANENABLE) &&
1562                             tp->link_config.speed == current_speed &&
1563                             tp->link_config.duplex == current_duplex) {
1564                                 current_link_up = 1;
1565                         } else {
1566                                 current_link_up = 0;
1567                         }
1568                 }
1569
1570                 tp->link_config.active_speed = current_speed;
1571                 tp->link_config.active_duplex = current_duplex;
1572         }
1573
1574         if (current_link_up == 1 &&
1575             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1576             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1577                 u32 local_adv, remote_adv;
1578
1579                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1580                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1581
1582                 tg3_readphy(tp, MII_LPA, &remote_adv);
1583                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1584
1585                 /* If we are not advertising full pause capability,
1586                  * something is wrong.  Bring the link down and reconfigure.
1587                  */
1588                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1589                         current_link_up = 0;
1590                 } else {
1591                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1592                 }
1593         }
1594 relink:
1595         if (current_link_up == 0) {
1596                 u32 tmp;
1597
1598                 tg3_phy_copper_begin(tp);
1599
1600                 tg3_readphy(tp, MII_BMSR, &tmp);
1601                 tg3_readphy(tp, MII_BMSR, &tmp);
1602                 if (tmp & BMSR_LSTATUS)
1603                         current_link_up = 1;
1604         }
1605
1606         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1607         if (current_link_up == 1) {
1608                 if (tp->link_config.active_speed == SPEED_100 ||
1609                     tp->link_config.active_speed == SPEED_10)
1610                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1611                 else
1612                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1613         } else
1614                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1615
1616         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1617         if (tp->link_config.active_duplex == DUPLEX_HALF)
1618                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1619
1620         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1622                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1623                     (current_link_up == 1 &&
1624                      tp->link_config.active_speed == SPEED_10))
1625                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1626         } else {
1627                 if (current_link_up == 1)
1628                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1629         }
1630
1631         /* ??? Without this setting Netgear GA302T PHY does not
1632          * ??? send/receive packets...
1633          */
1634         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1635             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1636                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1637                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1638                 udelay(80);
1639         }
1640
1641         tw32_f(MAC_MODE, tp->mac_mode);
1642         udelay(40);
1643
1644         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1645                 /* Polled via timer. */
1646                 tw32_f(MAC_EVENT, 0);
1647         } else {
1648                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1649         }
1650         udelay(40);
1651
1652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1653             current_link_up == 1 &&
1654             tp->link_config.active_speed == SPEED_1000 &&
1655             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1656              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1657                 udelay(120);
1658                 tw32_f(MAC_STATUS,
1659                      (MAC_STATUS_SYNC_CHANGED |
1660                       MAC_STATUS_CFG_CHANGED));
1661                 udelay(40);
1662                 tg3_write_mem(tp,
1663                               NIC_SRAM_FIRMWARE_MBOX,
1664                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1665         }
1666
1667         if (current_link_up != netif_carrier_ok(tp->dev)) {
1668                 if (current_link_up)
1669                         netif_carrier_on(tp->dev);
1670                 else
1671                         netif_carrier_off(tp->dev);
1672                 tg3_link_report(tp);
1673         }
1674
1675         return 0;
1676 }
1677
1678 struct tg3_fiber_aneginfo {
1679         int state;
1680 #define ANEG_STATE_UNKNOWN              0
1681 #define ANEG_STATE_AN_ENABLE            1
1682 #define ANEG_STATE_RESTART_INIT         2
1683 #define ANEG_STATE_RESTART              3
1684 #define ANEG_STATE_DISABLE_LINK_OK      4
1685 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1686 #define ANEG_STATE_ABILITY_DETECT       6
1687 #define ANEG_STATE_ACK_DETECT_INIT      7
1688 #define ANEG_STATE_ACK_DETECT           8
1689 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1690 #define ANEG_STATE_COMPLETE_ACK         10
1691 #define ANEG_STATE_IDLE_DETECT_INIT     11
1692 #define ANEG_STATE_IDLE_DETECT          12
1693 #define ANEG_STATE_LINK_OK              13
1694 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1695 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1696
1697         u32 flags;
1698 #define MR_AN_ENABLE            0x00000001
1699 #define MR_RESTART_AN           0x00000002
1700 #define MR_AN_COMPLETE          0x00000004
1701 #define MR_PAGE_RX              0x00000008
1702 #define MR_NP_LOADED            0x00000010
1703 #define MR_TOGGLE_TX            0x00000020
1704 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1705 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1706 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1707 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1708 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1709 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1710 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1711 #define MR_TOGGLE_RX            0x00002000
1712 #define MR_NP_RX                0x00004000
1713
1714 #define MR_LINK_OK              0x80000000
1715
1716         unsigned long link_time, cur_time;
1717
1718         u32 ability_match_cfg;
1719         int ability_match_count;
1720
1721         char ability_match, idle_match, ack_match;
1722
1723         u32 txconfig, rxconfig;
1724 #define ANEG_CFG_NP             0x00000080
1725 #define ANEG_CFG_ACK            0x00000040
1726 #define ANEG_CFG_RF2            0x00000020
1727 #define ANEG_CFG_RF1            0x00000010
1728 #define ANEG_CFG_PS2            0x00000001
1729 #define ANEG_CFG_PS1            0x00008000
1730 #define ANEG_CFG_HD             0x00004000
1731 #define ANEG_CFG_FD             0x00002000
1732 #define ANEG_CFG_INVAL          0x00001f06
1733
1734 };
1735 #define ANEG_OK         0
1736 #define ANEG_DONE       1
1737 #define ANEG_TIMER_ENAB 2
1738 #define ANEG_FAILED     -1
1739
1740 #define ANEG_STATE_SETTLE_TIME  10000
1741
1742 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1743                                    struct tg3_fiber_aneginfo *ap)
1744 {
1745         unsigned long delta;
1746         u32 rx_cfg_reg;
1747         int ret;
1748
1749         if (ap->state == ANEG_STATE_UNKNOWN) {
1750                 ap->rxconfig = 0;
1751                 ap->link_time = 0;
1752                 ap->cur_time = 0;
1753                 ap->ability_match_cfg = 0;
1754                 ap->ability_match_count = 0;
1755                 ap->ability_match = 0;
1756                 ap->idle_match = 0;
1757                 ap->ack_match = 0;
1758         }
1759         ap->cur_time++;
1760
1761         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1762                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1763
1764                 if (rx_cfg_reg != ap->ability_match_cfg) {
1765                         ap->ability_match_cfg = rx_cfg_reg;
1766                         ap->ability_match = 0;
1767                         ap->ability_match_count = 0;
1768                 } else {
1769                         if (++ap->ability_match_count > 1) {
1770                                 ap->ability_match = 1;
1771                                 ap->ability_match_cfg = rx_cfg_reg;
1772                         }
1773                 }
1774                 if (rx_cfg_reg & ANEG_CFG_ACK)
1775                         ap->ack_match = 1;
1776                 else
1777                         ap->ack_match = 0;
1778
1779                 ap->idle_match = 0;
1780         } else {
1781                 ap->idle_match = 1;
1782                 ap->ability_match_cfg = 0;
1783                 ap->ability_match_count = 0;
1784                 ap->ability_match = 0;
1785                 ap->ack_match = 0;
1786
1787                 rx_cfg_reg = 0;
1788         }
1789
1790         ap->rxconfig = rx_cfg_reg;
1791         ret = ANEG_OK;
1792
1793         switch(ap->state) {
1794         case ANEG_STATE_UNKNOWN:
1795                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1796                         ap->state = ANEG_STATE_AN_ENABLE;
1797
1798                 /* fallthru */
1799         case ANEG_STATE_AN_ENABLE:
1800                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1801                 if (ap->flags & MR_AN_ENABLE) {
1802                         ap->link_time = 0;
1803                         ap->cur_time = 0;
1804                         ap->ability_match_cfg = 0;
1805                         ap->ability_match_count = 0;
1806                         ap->ability_match = 0;
1807                         ap->idle_match = 0;
1808                         ap->ack_match = 0;
1809
1810                         ap->state = ANEG_STATE_RESTART_INIT;
1811                 } else {
1812                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1813                 }
1814                 break;
1815
1816         case ANEG_STATE_RESTART_INIT:
1817                 ap->link_time = ap->cur_time;
1818                 ap->flags &= ~(MR_NP_LOADED);
1819                 ap->txconfig = 0;
1820                 tw32(MAC_TX_AUTO_NEG, 0);
1821                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1822                 tw32_f(MAC_MODE, tp->mac_mode);
1823                 udelay(40);
1824
1825                 ret = ANEG_TIMER_ENAB;
1826                 ap->state = ANEG_STATE_RESTART;
1827
1828                 /* fallthru */
1829         case ANEG_STATE_RESTART:
1830                 delta = ap->cur_time - ap->link_time;
1831                 if (delta > ANEG_STATE_SETTLE_TIME) {
1832                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1833                 } else {
1834                         ret = ANEG_TIMER_ENAB;
1835                 }
1836                 break;
1837
1838         case ANEG_STATE_DISABLE_LINK_OK:
1839                 ret = ANEG_DONE;
1840                 break;
1841
1842         case ANEG_STATE_ABILITY_DETECT_INIT:
1843                 ap->flags &= ~(MR_TOGGLE_TX);
1844                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1845                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1846                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1847                 tw32_f(MAC_MODE, tp->mac_mode);
1848                 udelay(40);
1849
1850                 ap->state = ANEG_STATE_ABILITY_DETECT;
1851                 break;
1852
1853         case ANEG_STATE_ABILITY_DETECT:
1854                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1855                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1856                 }
1857                 break;
1858
1859         case ANEG_STATE_ACK_DETECT_INIT:
1860                 ap->txconfig |= ANEG_CFG_ACK;
1861                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1862                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1863                 tw32_f(MAC_MODE, tp->mac_mode);
1864                 udelay(40);
1865
1866                 ap->state = ANEG_STATE_ACK_DETECT;
1867
1868                 /* fallthru */
1869         case ANEG_STATE_ACK_DETECT:
1870                 if (ap->ack_match != 0) {
1871                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1872                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1873                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1874                         } else {
1875                                 ap->state = ANEG_STATE_AN_ENABLE;
1876                         }
1877                 } else if (ap->ability_match != 0 &&
1878                            ap->rxconfig == 0) {
1879                         ap->state = ANEG_STATE_AN_ENABLE;
1880                 }
1881                 break;
1882
1883         case ANEG_STATE_COMPLETE_ACK_INIT:
1884                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1885                         ret = ANEG_FAILED;
1886                         break;
1887                 }
1888                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1889                                MR_LP_ADV_HALF_DUPLEX |
1890                                MR_LP_ADV_SYM_PAUSE |
1891                                MR_LP_ADV_ASYM_PAUSE |
1892                                MR_LP_ADV_REMOTE_FAULT1 |
1893                                MR_LP_ADV_REMOTE_FAULT2 |
1894                                MR_LP_ADV_NEXT_PAGE |
1895                                MR_TOGGLE_RX |
1896                                MR_NP_RX);
1897                 if (ap->rxconfig & ANEG_CFG_FD)
1898                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1899                 if (ap->rxconfig & ANEG_CFG_HD)
1900                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1901                 if (ap->rxconfig & ANEG_CFG_PS1)
1902                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1903                 if (ap->rxconfig & ANEG_CFG_PS2)
1904                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1905                 if (ap->rxconfig & ANEG_CFG_RF1)
1906                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1907                 if (ap->rxconfig & ANEG_CFG_RF2)
1908                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1909                 if (ap->rxconfig & ANEG_CFG_NP)
1910                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1911
1912                 ap->link_time = ap->cur_time;
1913
1914                 ap->flags ^= (MR_TOGGLE_TX);
1915                 if (ap->rxconfig & 0x0008)
1916                         ap->flags |= MR_TOGGLE_RX;
1917                 if (ap->rxconfig & ANEG_CFG_NP)
1918                         ap->flags |= MR_NP_RX;
1919                 ap->flags |= MR_PAGE_RX;
1920
1921                 ap->state = ANEG_STATE_COMPLETE_ACK;
1922                 ret = ANEG_TIMER_ENAB;
1923                 break;
1924
1925         case ANEG_STATE_COMPLETE_ACK:
1926                 if (ap->ability_match != 0 &&
1927                     ap->rxconfig == 0) {
1928                         ap->state = ANEG_STATE_AN_ENABLE;
1929                         break;
1930                 }
1931                 delta = ap->cur_time - ap->link_time;
1932                 if (delta > ANEG_STATE_SETTLE_TIME) {
1933                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1934                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1935                         } else {
1936                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1937                                     !(ap->flags & MR_NP_RX)) {
1938                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1939                                 } else {
1940                                         ret = ANEG_FAILED;
1941                                 }
1942                         }
1943                 }
1944                 break;
1945
1946         case ANEG_STATE_IDLE_DETECT_INIT:
1947                 ap->link_time = ap->cur_time;
1948                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1949                 tw32_f(MAC_MODE, tp->mac_mode);
1950                 udelay(40);
1951
1952                 ap->state = ANEG_STATE_IDLE_DETECT;
1953                 ret = ANEG_TIMER_ENAB;
1954                 break;
1955
1956         case ANEG_STATE_IDLE_DETECT:
1957                 if (ap->ability_match != 0 &&
1958                     ap->rxconfig == 0) {
1959                         ap->state = ANEG_STATE_AN_ENABLE;
1960                         break;
1961                 }
1962                 delta = ap->cur_time - ap->link_time;
1963                 if (delta > ANEG_STATE_SETTLE_TIME) {
1964                         /* XXX another gem from the Broadcom driver :( */
1965                         ap->state = ANEG_STATE_LINK_OK;
1966                 }
1967                 break;
1968
1969         case ANEG_STATE_LINK_OK:
1970                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1971                 ret = ANEG_DONE;
1972                 break;
1973
1974         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1975                 /* ??? unimplemented */
1976                 break;
1977
1978         case ANEG_STATE_NEXT_PAGE_WAIT:
1979                 /* ??? unimplemented */
1980                 break;
1981
1982         default:
1983                 ret = ANEG_FAILED;
1984                 break;
1985         };
1986
1987         return ret;
1988 }
1989
1990 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
1991 {
1992         int res = 0;
1993         struct tg3_fiber_aneginfo aninfo;
1994         int status = ANEG_FAILED;
1995         unsigned int tick;
1996         u32 tmp;
1997
1998         tw32_f(MAC_TX_AUTO_NEG, 0);
1999
2000         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2001         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2002         udelay(40);
2003
2004         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2005         udelay(40);
2006
2007         memset(&aninfo, 0, sizeof(aninfo));
2008         aninfo.flags |= MR_AN_ENABLE;
2009         aninfo.state = ANEG_STATE_UNKNOWN;
2010         aninfo.cur_time = 0;
2011         tick = 0;
2012         while (++tick < 195000) {
2013                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2014                 if (status == ANEG_DONE || status == ANEG_FAILED)
2015                         break;
2016
2017                 udelay(1);
2018         }
2019
2020         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2021         tw32_f(MAC_MODE, tp->mac_mode);
2022         udelay(40);
2023
2024         *flags = aninfo.flags;
2025
2026         if (status == ANEG_DONE &&
2027             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2028                              MR_LP_ADV_FULL_DUPLEX)))
2029                 res = 1;
2030
2031         return res;
2032 }
2033
2034 static void tg3_init_bcm8002(struct tg3 *tp)
2035 {
2036         u32 mac_status = tr32(MAC_STATUS);
2037         int i;
2038
2039         /* Reset when initting first time or we have a link. */
2040         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2041             !(mac_status & MAC_STATUS_PCS_SYNCED))
2042                 return;
2043
2044         /* Set PLL lock range. */
2045         tg3_writephy(tp, 0x16, 0x8007);
2046
2047         /* SW reset */
2048         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2049
2050         /* Wait for reset to complete. */
2051         /* XXX schedule_timeout() ... */
2052         for (i = 0; i < 500; i++)
2053                 udelay(10);
2054
2055         /* Config mode; select PMA/Ch 1 regs. */
2056         tg3_writephy(tp, 0x10, 0x8411);
2057
2058         /* Enable auto-lock and comdet, select txclk for tx. */
2059         tg3_writephy(tp, 0x11, 0x0a10);
2060
2061         tg3_writephy(tp, 0x18, 0x00a0);
2062         tg3_writephy(tp, 0x16, 0x41ff);
2063
2064         /* Assert and deassert POR. */
2065         tg3_writephy(tp, 0x13, 0x0400);
2066         udelay(40);
2067         tg3_writephy(tp, 0x13, 0x0000);
2068
2069         tg3_writephy(tp, 0x11, 0x0a50);
2070         udelay(40);
2071         tg3_writephy(tp, 0x11, 0x0a10);
2072
2073         /* Wait for signal to stabilize */
2074         /* XXX schedule_timeout() ... */
2075         for (i = 0; i < 15000; i++)
2076                 udelay(10);
2077
2078         /* Deselect the channel register so we can read the PHYID
2079          * later.
2080          */
2081         tg3_writephy(tp, 0x10, 0x8011);
2082 }
2083
2084 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2085 {
2086         u32 sg_dig_ctrl, sg_dig_status;
2087         u32 serdes_cfg, expected_sg_dig_ctrl;
2088         int workaround, port_a;
2089         int current_link_up;
2090
2091         serdes_cfg = 0;
2092         expected_sg_dig_ctrl = 0;
2093         workaround = 0;
2094         port_a = 1;
2095         current_link_up = 0;
2096
2097         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2098             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2099                 workaround = 1;
2100                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2101                         port_a = 0;
2102
2103                 serdes_cfg = tr32(MAC_SERDES_CFG) &
2104                         ((1 << 23) | (1 << 22) | (1 << 21) | (1 << 20));
2105         }
2106
2107         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2108
2109         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2110                 if (sg_dig_ctrl & (1 << 31)) {
2111                         if (workaround) {
2112                                 u32 val = serdes_cfg;
2113
2114                                 if (port_a)
2115                                         val |= 0xc010880;
2116                                 else
2117                                         val |= 0x4010880;
2118                                 tw32_f(MAC_SERDES_CFG, val);
2119                         }
2120                         tw32_f(SG_DIG_CTRL, 0x01388400);
2121                 }
2122                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2123                         tg3_setup_flow_control(tp, 0, 0);
2124                         current_link_up = 1;
2125                 }
2126                 goto out;
2127         }
2128
2129         /* Want auto-negotiation.  */
2130         expected_sg_dig_ctrl = 0x81388400;
2131
2132         /* Pause capability */
2133         expected_sg_dig_ctrl |= (1 << 11);
2134
2135         /* Asymettric pause */
2136         expected_sg_dig_ctrl |= (1 << 12);
2137
2138         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2139                 if (workaround)
2140                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011880);
2141                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2142                 udelay(5);
2143                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2144
2145                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2146         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2147                                  MAC_STATUS_SIGNAL_DET)) {
2148                 int i;
2149
2150                 /* Giver time to negotiate (~200ms) */
2151                 for (i = 0; i < 40000; i++) {
2152                         sg_dig_status = tr32(SG_DIG_STATUS);
2153                         if (sg_dig_status & (0x3))
2154                                 break;
2155                         udelay(5);
2156                 }
2157                 mac_status = tr32(MAC_STATUS);
2158
2159                 if ((sg_dig_status & (1 << 1)) &&
2160                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2161                         u32 local_adv, remote_adv;
2162
2163                         local_adv = ADVERTISE_PAUSE_CAP;
2164                         remote_adv = 0;
2165                         if (sg_dig_status & (1 << 19))
2166                                 remote_adv |= LPA_PAUSE_CAP;
2167                         if (sg_dig_status & (1 << 20))
2168                                 remote_adv |= LPA_PAUSE_ASYM;
2169
2170                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2171                         current_link_up = 1;
2172                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2173                 } else if (!(sg_dig_status & (1 << 1))) {
2174                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2175                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2176                         else {
2177                                 if (workaround) {
2178                                         u32 val = serdes_cfg;
2179
2180                                         if (port_a)
2181                                                 val |= 0xc010880;
2182                                         else
2183                                                 val |= 0x4010880;
2184
2185                                         tw32_f(MAC_SERDES_CFG, val);
2186                                 }
2187
2188                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2189                                 udelay(40);
2190
2191                                 mac_status = tr32(MAC_STATUS);
2192                                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2193                                         tg3_setup_flow_control(tp, 0, 0);
2194                                         current_link_up = 1;
2195                                 }
2196                         }
2197                 }
2198         }
2199
2200 out:
2201         return current_link_up;
2202 }
2203
2204 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2205 {
2206         int current_link_up = 0;
2207
2208         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2209                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2210                 goto out;
2211         }
2212
2213         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2214                 u32 flags;
2215                 int i;
2216   
2217                 if (fiber_autoneg(tp, &flags)) {
2218                         u32 local_adv, remote_adv;
2219
2220                         local_adv = ADVERTISE_PAUSE_CAP;
2221                         remote_adv = 0;
2222                         if (flags & MR_LP_ADV_SYM_PAUSE)
2223                                 remote_adv |= LPA_PAUSE_CAP;
2224                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2225                                 remote_adv |= LPA_PAUSE_ASYM;
2226
2227                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2228
2229                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2230                         current_link_up = 1;
2231                 }
2232                 for (i = 0; i < 30; i++) {
2233                         udelay(20);
2234                         tw32_f(MAC_STATUS,
2235                                (MAC_STATUS_SYNC_CHANGED |
2236                                 MAC_STATUS_CFG_CHANGED));
2237                         udelay(40);
2238                         if ((tr32(MAC_STATUS) &
2239                              (MAC_STATUS_SYNC_CHANGED |
2240                               MAC_STATUS_CFG_CHANGED)) == 0)
2241                                 break;
2242                 }
2243
2244                 mac_status = tr32(MAC_STATUS);
2245                 if (current_link_up == 0 &&
2246                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2247                     !(mac_status & MAC_STATUS_RCVD_CFG))
2248                         current_link_up = 1;
2249         } else {
2250                 /* Forcing 1000FD link up. */
2251                 current_link_up = 1;
2252                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2253
2254                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2255                 udelay(40);
2256         }
2257
2258 out:
2259         return current_link_up;
2260 }
2261
2262 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2263 {
2264         u32 orig_pause_cfg;
2265         u16 orig_active_speed;
2266         u8 orig_active_duplex;
2267         u32 mac_status;
2268         int current_link_up;
2269         int i;
2270
2271         orig_pause_cfg =
2272                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2273                                   TG3_FLAG_TX_PAUSE));
2274         orig_active_speed = tp->link_config.active_speed;
2275         orig_active_duplex = tp->link_config.active_duplex;
2276
2277         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2278             netif_carrier_ok(tp->dev) &&
2279             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2280                 mac_status = tr32(MAC_STATUS);
2281                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2282                                MAC_STATUS_SIGNAL_DET |
2283                                MAC_STATUS_CFG_CHANGED |
2284                                MAC_STATUS_RCVD_CFG);
2285                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2286                                    MAC_STATUS_SIGNAL_DET)) {
2287                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2288                                             MAC_STATUS_CFG_CHANGED));
2289                         return 0;
2290                 }
2291         }
2292
2293         tw32_f(MAC_TX_AUTO_NEG, 0);
2294
2295         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2296         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2297         tw32_f(MAC_MODE, tp->mac_mode);
2298         udelay(40);
2299
2300         if (tp->phy_id == PHY_ID_BCM8002)
2301                 tg3_init_bcm8002(tp);
2302
2303         /* Enable link change event even when serdes polling.  */
2304         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2305         udelay(40);
2306
2307         current_link_up = 0;
2308         mac_status = tr32(MAC_STATUS);
2309
2310         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2311                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2312         else
2313                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2314
2315         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2316         tw32_f(MAC_MODE, tp->mac_mode);
2317         udelay(40);
2318
2319         tp->hw_status->status =
2320                 (SD_STATUS_UPDATED |
2321                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2322
2323         for (i = 0; i < 100; i++) {
2324                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2325                                     MAC_STATUS_CFG_CHANGED));
2326                 udelay(5);
2327                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2328                                          MAC_STATUS_CFG_CHANGED)) == 0)
2329                         break;
2330         }
2331
2332         mac_status = tr32(MAC_STATUS);
2333         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2334                 current_link_up = 0;
2335                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2336                         tw32_f(MAC_MODE, (tp->mac_mode |
2337                                           MAC_MODE_SEND_CONFIGS));
2338                         udelay(1);
2339                         tw32_f(MAC_MODE, tp->mac_mode);
2340                 }
2341         }
2342
2343         if (current_link_up == 1) {
2344                 tp->link_config.active_speed = SPEED_1000;
2345                 tp->link_config.active_duplex = DUPLEX_FULL;
2346                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2347                                     LED_CTRL_LNKLED_OVERRIDE |
2348                                     LED_CTRL_1000MBPS_ON));
2349         } else {
2350                 tp->link_config.active_speed = SPEED_INVALID;
2351                 tp->link_config.active_duplex = DUPLEX_INVALID;
2352                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2353                                     LED_CTRL_LNKLED_OVERRIDE |
2354                                     LED_CTRL_TRAFFIC_OVERRIDE));
2355         }
2356
2357         if (current_link_up != netif_carrier_ok(tp->dev)) {
2358                 if (current_link_up)
2359                         netif_carrier_on(tp->dev);
2360                 else
2361                         netif_carrier_off(tp->dev);
2362                 tg3_link_report(tp);
2363         } else {
2364                 u32 now_pause_cfg =
2365                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2366                                          TG3_FLAG_TX_PAUSE);
2367                 if (orig_pause_cfg != now_pause_cfg ||
2368                     orig_active_speed != tp->link_config.active_speed ||
2369                     orig_active_duplex != tp->link_config.active_duplex)
2370                         tg3_link_report(tp);
2371         }
2372
2373         return 0;
2374 }
2375
2376 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2377 {
2378         int err;
2379
2380         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2381                 err = tg3_setup_fiber_phy(tp, force_reset);
2382         } else {
2383                 err = tg3_setup_copper_phy(tp, force_reset);
2384         }
2385
2386         if (tp->link_config.active_speed == SPEED_1000 &&
2387             tp->link_config.active_duplex == DUPLEX_HALF)
2388                 tw32(MAC_TX_LENGTHS,
2389                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2390                       (6 << TX_LENGTHS_IPG_SHIFT) |
2391                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2392         else
2393                 tw32(MAC_TX_LENGTHS,
2394                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2395                       (6 << TX_LENGTHS_IPG_SHIFT) |
2396                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2397
2398         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2399             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2400                 if (netif_carrier_ok(tp->dev)) {
2401                         tw32(HOSTCC_STAT_COAL_TICKS,
2402                              DEFAULT_STAT_COAL_TICKS);
2403                 } else {
2404                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2405                 }
2406         }
2407
2408         return err;
2409 }
2410
2411 /* Tigon3 never reports partial packet sends.  So we do not
2412  * need special logic to handle SKBs that have not had all
2413  * of their frags sent yet, like SunGEM does.
2414  */
2415 static void tg3_tx(struct tg3 *tp)
2416 {
2417         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2418         u32 sw_idx = tp->tx_cons;
2419
2420         while (sw_idx != hw_idx) {
2421                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2422                 struct sk_buff *skb = ri->skb;
2423                 int i;
2424
2425                 if (unlikely(skb == NULL))
2426                         BUG();
2427
2428                 pci_unmap_single(tp->pdev,
2429                                  pci_unmap_addr(ri, mapping),
2430                                  skb_headlen(skb),
2431                                  PCI_DMA_TODEVICE);
2432
2433                 ri->skb = NULL;
2434
2435                 sw_idx = NEXT_TX(sw_idx);
2436
2437                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2438                         if (unlikely(sw_idx == hw_idx))
2439                                 BUG();
2440
2441                         ri = &tp->tx_buffers[sw_idx];
2442                         if (unlikely(ri->skb != NULL))
2443                                 BUG();
2444
2445                         pci_unmap_page(tp->pdev,
2446                                        pci_unmap_addr(ri, mapping),
2447                                        skb_shinfo(skb)->frags[i].size,
2448                                        PCI_DMA_TODEVICE);
2449
2450                         sw_idx = NEXT_TX(sw_idx);
2451                 }
2452
2453                 dev_kfree_skb_irq(skb);
2454         }
2455
2456         tp->tx_cons = sw_idx;
2457
2458         if (netif_queue_stopped(tp->dev) &&
2459             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2460                 netif_wake_queue(tp->dev);
2461 }
2462
2463 /* Returns size of skb allocated or < 0 on error.
2464  *
2465  * We only need to fill in the address because the other members
2466  * of the RX descriptor are invariant, see tg3_init_rings.
2467  *
2468  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2469  * posting buffers we only dirty the first cache line of the RX
2470  * descriptor (containing the address).  Whereas for the RX status
2471  * buffers the cpu only reads the last cacheline of the RX descriptor
2472  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2473  */
2474 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2475                             int src_idx, u32 dest_idx_unmasked)
2476 {
2477         struct tg3_rx_buffer_desc *desc;
2478         struct ring_info *map, *src_map;
2479         struct sk_buff *skb;
2480         dma_addr_t mapping;
2481         int skb_size, dest_idx;
2482
2483         src_map = NULL;
2484         switch (opaque_key) {
2485         case RXD_OPAQUE_RING_STD:
2486                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2487                 desc = &tp->rx_std[dest_idx];
2488                 map = &tp->rx_std_buffers[dest_idx];
2489                 if (src_idx >= 0)
2490                         src_map = &tp->rx_std_buffers[src_idx];
2491                 skb_size = RX_PKT_BUF_SZ;
2492                 break;
2493
2494         case RXD_OPAQUE_RING_JUMBO:
2495                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2496                 desc = &tp->rx_jumbo[dest_idx];
2497                 map = &tp->rx_jumbo_buffers[dest_idx];
2498                 if (src_idx >= 0)
2499                         src_map = &tp->rx_jumbo_buffers[src_idx];
2500                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2501                 break;
2502
2503         default:
2504                 return -EINVAL;
2505         };
2506
2507         /* Do not overwrite any of the map or rp information
2508          * until we are sure we can commit to a new buffer.
2509          *
2510          * Callers depend upon this behavior and assume that
2511          * we leave everything unchanged if we fail.
2512          */
2513         skb = dev_alloc_skb(skb_size);
2514         if (skb == NULL)
2515                 return -ENOMEM;
2516
2517         skb->dev = tp->dev;
2518         skb_reserve(skb, tp->rx_offset);
2519
2520         mapping = pci_map_single(tp->pdev, skb->data,
2521                                  skb_size - tp->rx_offset,
2522                                  PCI_DMA_FROMDEVICE);
2523
2524         map->skb = skb;
2525         pci_unmap_addr_set(map, mapping, mapping);
2526
2527         if (src_map != NULL)
2528                 src_map->skb = NULL;
2529
2530         desc->addr_hi = ((u64)mapping >> 32);
2531         desc->addr_lo = ((u64)mapping & 0xffffffff);
2532
2533         return skb_size;
2534 }
2535
2536 /* We only need to move over in the address because the other
2537  * members of the RX descriptor are invariant.  See notes above
2538  * tg3_alloc_rx_skb for full details.
2539  */
2540 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2541                            int src_idx, u32 dest_idx_unmasked)
2542 {
2543         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2544         struct ring_info *src_map, *dest_map;
2545         int dest_idx;
2546
2547         switch (opaque_key) {
2548         case RXD_OPAQUE_RING_STD:
2549                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2550                 dest_desc = &tp->rx_std[dest_idx];
2551                 dest_map = &tp->rx_std_buffers[dest_idx];
2552                 src_desc = &tp->rx_std[src_idx];
2553                 src_map = &tp->rx_std_buffers[src_idx];
2554                 break;
2555
2556         case RXD_OPAQUE_RING_JUMBO:
2557                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2558                 dest_desc = &tp->rx_jumbo[dest_idx];
2559                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2560                 src_desc = &tp->rx_jumbo[src_idx];
2561                 src_map = &tp->rx_jumbo_buffers[src_idx];
2562                 break;
2563
2564         default:
2565                 return;
2566         };
2567
2568         dest_map->skb = src_map->skb;
2569         pci_unmap_addr_set(dest_map, mapping,
2570                            pci_unmap_addr(src_map, mapping));
2571         dest_desc->addr_hi = src_desc->addr_hi;
2572         dest_desc->addr_lo = src_desc->addr_lo;
2573
2574         src_map->skb = NULL;
2575 }
2576
2577 #if TG3_VLAN_TAG_USED
2578 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2579 {
2580         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2581 }
2582 #endif
2583
2584 /* The RX ring scheme is composed of multiple rings which post fresh
2585  * buffers to the chip, and one special ring the chip uses to report
2586  * status back to the host.
2587  *
2588  * The special ring reports the status of received packets to the
2589  * host.  The chip does not write into the original descriptor the
2590  * RX buffer was obtained from.  The chip simply takes the original
2591  * descriptor as provided by the host, updates the status and length
2592  * field, then writes this into the next status ring entry.
2593  *
2594  * Each ring the host uses to post buffers to the chip is described
2595  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2596  * it is first placed into the on-chip ram.  When the packet's length
2597  * is known, it walks down the TG3_BDINFO entries to select the ring.
2598  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2599  * which is within the range of the new packet's length is chosen.
2600  *
2601  * The "separate ring for rx status" scheme may sound queer, but it makes
2602  * sense from a cache coherency perspective.  If only the host writes
2603  * to the buffer post rings, and only the chip writes to the rx status
2604  * rings, then cache lines never move beyond shared-modified state.
2605  * If both the host and chip were to write into the same ring, cache line
2606  * eviction could occur since both entities want it in an exclusive state.
2607  */
2608 static int tg3_rx(struct tg3 *tp, int budget)
2609 {
2610         u32 work_mask;
2611         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2612         u16 hw_idx, sw_idx;
2613         int received;
2614
2615         hw_idx = tp->hw_status->idx[0].rx_producer;
2616         /*
2617          * We need to order the read of hw_idx and the read of
2618          * the opaque cookie.
2619          */
2620         rmb();
2621         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2622         work_mask = 0;
2623         received = 0;
2624         while (sw_idx != hw_idx && budget > 0) {
2625                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2626                 unsigned int len;
2627                 struct sk_buff *skb;
2628                 dma_addr_t dma_addr;
2629                 u32 opaque_key, desc_idx, *post_ptr;
2630
2631                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2632                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2633                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2634                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2635                                                   mapping);
2636                         skb = tp->rx_std_buffers[desc_idx].skb;
2637                         post_ptr = &tp->rx_std_ptr;
2638                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2639                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2640                                                   mapping);
2641                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2642                         post_ptr = &tp->rx_jumbo_ptr;
2643                 }
2644                 else {
2645                         goto next_pkt_nopost;
2646                 }
2647
2648                 work_mask |= opaque_key;
2649
2650                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2651                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2652                 drop_it:
2653                         tg3_recycle_rx(tp, opaque_key,
2654                                        desc_idx, *post_ptr);
2655                 drop_it_no_recycle:
2656                         /* Other statistics kept track of by card. */
2657                         tp->net_stats.rx_dropped++;
2658                         goto next_pkt;
2659                 }
2660
2661                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2662
2663                 if (len > RX_COPY_THRESHOLD) {
2664                         int skb_size;
2665
2666                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2667                                                     desc_idx, *post_ptr);
2668                         if (skb_size < 0)
2669                                 goto drop_it;
2670
2671                         pci_unmap_single(tp->pdev, dma_addr,
2672                                          skb_size - tp->rx_offset,
2673                                          PCI_DMA_FROMDEVICE);
2674
2675                         skb_put(skb, len);
2676                 } else {
2677                         struct sk_buff *copy_skb;
2678
2679                         tg3_recycle_rx(tp, opaque_key,
2680                                        desc_idx, *post_ptr);
2681
2682                         copy_skb = dev_alloc_skb(len + 2);
2683                         if (copy_skb == NULL)
2684                                 goto drop_it_no_recycle;
2685
2686                         copy_skb->dev = tp->dev;
2687                         skb_reserve(copy_skb, 2);
2688                         skb_put(copy_skb, len);
2689                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2690                         memcpy(copy_skb->data, skb->data, len);
2691                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2692
2693                         /* We'll reuse the original ring buffer. */
2694                         skb = copy_skb;
2695                 }
2696
2697                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2698                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2699                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2700                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2701                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2702                 else
2703                         skb->ip_summed = CHECKSUM_NONE;
2704
2705                 skb->protocol = eth_type_trans(skb, tp->dev);
2706 #if TG3_VLAN_TAG_USED
2707                 if (tp->vlgrp != NULL &&
2708                     desc->type_flags & RXD_FLAG_VLAN) {
2709                         tg3_vlan_rx(tp, skb,
2710                                     desc->err_vlan & RXD_VLAN_MASK);
2711                 } else
2712 #endif
2713                         netif_receive_skb(skb);
2714
2715                 tp->dev->last_rx = jiffies;
2716                 received++;
2717                 budget--;
2718
2719 next_pkt:
2720                 (*post_ptr)++;
2721 next_pkt_nopost:
2722                 rx_rcb_ptr++;
2723                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2724         }
2725
2726         /* ACK the status ring. */
2727         tp->rx_rcb_ptr = rx_rcb_ptr;
2728         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2729                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2730
2731         /* Refill RX ring(s). */
2732         if (work_mask & RXD_OPAQUE_RING_STD) {
2733                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2734                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2735                              sw_idx);
2736         }
2737         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2738                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2739                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2740                              sw_idx);
2741         }
2742
2743         return received;
2744 }
2745
2746 static int tg3_poll(struct net_device *netdev, int *budget)
2747 {
2748         struct tg3 *tp = netdev_priv(netdev);
2749         struct tg3_hw_status *sblk = tp->hw_status;
2750         unsigned long flags;
2751         int done;
2752
2753         spin_lock_irqsave(&tp->lock, flags);
2754
2755         /* handle link change and other phy events */
2756         if (!(tp->tg3_flags &
2757               (TG3_FLAG_USE_LINKCHG_REG |
2758                TG3_FLAG_POLL_SERDES))) {
2759                 if (sblk->status & SD_STATUS_LINK_CHG) {
2760                         sblk->status = SD_STATUS_UPDATED |
2761                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2762                         tg3_setup_phy(tp, 0);
2763                 }
2764         }
2765
2766         /* run TX completion thread */
2767         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2768                 spin_lock(&tp->tx_lock);
2769                 tg3_tx(tp);
2770                 spin_unlock(&tp->tx_lock);
2771         }
2772
2773         spin_unlock_irqrestore(&tp->lock, flags);
2774
2775         /* run RX thread, within the bounds set by NAPI.
2776          * All RX "locking" is done by ensuring outside
2777          * code synchronizes with dev->poll()
2778          */
2779         done = 1;
2780         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2781                 int orig_budget = *budget;
2782                 int work_done;
2783
2784                 if (orig_budget > netdev->quota)
2785                         orig_budget = netdev->quota;
2786
2787                 work_done = tg3_rx(tp, orig_budget);
2788
2789                 *budget -= work_done;
2790                 netdev->quota -= work_done;
2791
2792                 if (work_done >= orig_budget)
2793                         done = 0;
2794         }
2795
2796         /* if no more work, tell net stack and NIC we're done */
2797         if (done) {
2798                 spin_lock_irqsave(&tp->lock, flags);
2799                 __netif_rx_complete(netdev);
2800                 tg3_enable_ints(tp);
2801                 spin_unlock_irqrestore(&tp->lock, flags);
2802         }
2803
2804         return (done ? 0 : 1);
2805 }
2806
2807 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2808 {
2809         struct tg3_hw_status *sblk = tp->hw_status;
2810         unsigned int work_exists = 0;
2811
2812         /* check for phy events */
2813         if (!(tp->tg3_flags &
2814               (TG3_FLAG_USE_LINKCHG_REG |
2815                TG3_FLAG_POLL_SERDES))) {
2816                 if (sblk->status & SD_STATUS_LINK_CHG)
2817                         work_exists = 1;
2818         }
2819         /* check for RX/TX work to do */
2820         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2821             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2822                 work_exists = 1;
2823
2824         return work_exists;
2825 }
2826
2827 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2828 {
2829         struct net_device *dev = dev_id;
2830         struct tg3 *tp = netdev_priv(dev);
2831         struct tg3_hw_status *sblk = tp->hw_status;
2832         unsigned long flags;
2833         unsigned int handled = 1;
2834
2835         spin_lock_irqsave(&tp->lock, flags);
2836
2837         if (sblk->status & SD_STATUS_UPDATED) {
2838                 /*
2839                  * writing any value to intr-mbox-0 clears PCI INTA# and
2840                  * chip-internal interrupt pending events.
2841                  * writing non-zero to intr-mbox-0 additional tells the
2842                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2843                  * event coalescing.
2844                  */
2845                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2846                              0x00000001);
2847                 /*
2848                  * Flush PCI write.  This also guarantees that our
2849                  * status block has been flushed to host memory.
2850                  */
2851                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2852                 sblk->status &= ~SD_STATUS_UPDATED;
2853
2854                 if (likely(tg3_has_work(dev, tp)))
2855                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2856                 else {
2857                         /* no work, shared interrupt perhaps?  re-enable
2858                          * interrupts, and flush that PCI write
2859                          */
2860                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2861                                 0x00000000);
2862                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2863                 }
2864         } else {        /* shared interrupt */
2865                 handled = 0;
2866         }
2867
2868         spin_unlock_irqrestore(&tp->lock, flags);
2869
2870         return IRQ_RETVAL(handled);
2871 }
2872
2873 static int tg3_init_hw(struct tg3 *);
2874 static int tg3_halt(struct tg3 *);
2875
2876 #ifdef CONFIG_NET_POLL_CONTROLLER
2877 static void tg3_poll_controller(struct net_device *dev)
2878 {
2879         tg3_interrupt(dev->irq, dev, NULL);
2880 }
2881 #endif
2882
2883 static void tg3_reset_task(void *_data)
2884 {
2885         struct tg3 *tp = _data;
2886         unsigned int restart_timer;
2887
2888         tg3_netif_stop(tp);
2889
2890         spin_lock_irq(&tp->lock);
2891         spin_lock(&tp->tx_lock);
2892
2893         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2894         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2895
2896         tg3_halt(tp);
2897         tg3_init_hw(tp);
2898
2899         tg3_netif_start(tp);
2900
2901         spin_unlock(&tp->tx_lock);
2902         spin_unlock_irq(&tp->lock);
2903
2904         if (restart_timer)
2905                 mod_timer(&tp->timer, jiffies + 1);
2906 }
2907
2908 static void tg3_tx_timeout(struct net_device *dev)
2909 {
2910         struct tg3 *tp = netdev_priv(dev);
2911
2912         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2913                dev->name);
2914
2915         schedule_work(&tp->reset_task);
2916 }
2917
2918 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2919
2920 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2921                                        u32 guilty_entry, int guilty_len,
2922                                        u32 last_plus_one, u32 *start, u32 mss)
2923 {
2924         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2925         dma_addr_t new_addr;
2926         u32 entry = *start;
2927         int i;
2928
2929         if (!new_skb) {
2930                 dev_kfree_skb(skb);
2931                 return -1;
2932         }
2933
2934         /* New SKB is guaranteed to be linear. */
2935         entry = *start;
2936         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2937                                   PCI_DMA_TODEVICE);
2938         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2939                     (skb->ip_summed == CHECKSUM_HW) ?
2940                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2941         *start = NEXT_TX(entry);
2942
2943         /* Now clean up the sw ring entries. */
2944         i = 0;
2945         while (entry != last_plus_one) {
2946                 int len;
2947
2948                 if (i == 0)
2949                         len = skb_headlen(skb);
2950                 else
2951                         len = skb_shinfo(skb)->frags[i-1].size;
2952                 pci_unmap_single(tp->pdev,
2953                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2954                                  len, PCI_DMA_TODEVICE);
2955                 if (i == 0) {
2956                         tp->tx_buffers[entry].skb = new_skb;
2957                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2958                 } else {
2959                         tp->tx_buffers[entry].skb = NULL;
2960                 }
2961                 entry = NEXT_TX(entry);
2962         }
2963
2964         dev_kfree_skb(skb);
2965
2966         return 0;
2967 }
2968
2969 static void tg3_set_txd(struct tg3 *tp, int entry,
2970                         dma_addr_t mapping, int len, u32 flags,
2971                         u32 mss_and_is_end)
2972 {
2973         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2974         int is_end = (mss_and_is_end & 0x1);
2975         u32 mss = (mss_and_is_end >> 1);
2976         u32 vlan_tag = 0;
2977
2978         if (is_end)
2979                 flags |= TXD_FLAG_END;
2980         if (flags & TXD_FLAG_VLAN) {
2981                 vlan_tag = flags >> 16;
2982                 flags &= 0xffff;
2983         }
2984         vlan_tag |= (mss << TXD_MSS_SHIFT);
2985
2986         txd->addr_hi = ((u64) mapping >> 32);
2987         txd->addr_lo = ((u64) mapping & 0xffffffff);
2988         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2989         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2990 }
2991
2992 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2993 {
2994         u32 base = (u32) mapping & 0xffffffff;
2995
2996         return ((base > 0xffffdcc0) &&
2997                 (base + len + 8 < base));
2998 }
2999
3000 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3001 {
3002         struct tg3 *tp = netdev_priv(dev);
3003         dma_addr_t mapping;
3004         unsigned int i;
3005         u32 len, entry, base_flags, mss;
3006         int would_hit_hwbug;
3007         unsigned long flags;
3008
3009         len = skb_headlen(skb);
3010
3011         /* No BH disabling for tx_lock here.  We are running in BH disabled
3012          * context and TX reclaim runs via tp->poll inside of a software
3013          * interrupt.  Rejoice!
3014          *
3015          * Actually, things are not so simple.  If we are to take a hw
3016          * IRQ here, we can deadlock, consider:
3017          *
3018          *       CPU1           CPU2
3019          *   tg3_start_xmit
3020          *   take tp->tx_lock
3021          *                      tg3_timer
3022          *                      take tp->lock
3023          *   tg3_interrupt
3024          *   spin on tp->lock
3025          *                      spin on tp->tx_lock
3026          *
3027          * So we really do need to disable interrupts when taking
3028          * tx_lock here.
3029          */
3030         local_irq_save(flags);
3031         if (!spin_trylock(&tp->tx_lock)) { 
3032                 local_irq_restore(flags);
3033                 return NETDEV_TX_LOCKED; 
3034         } 
3035
3036         /* This is a hard error, log it. */
3037         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3038                 netif_stop_queue(dev);
3039                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3040                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3041                        dev->name);
3042                 return NETDEV_TX_BUSY;
3043         }
3044
3045         entry = tp->tx_prod;
3046         base_flags = 0;
3047         if (skb->ip_summed == CHECKSUM_HW)
3048                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3049 #if TG3_TSO_SUPPORT != 0
3050         mss = 0;
3051         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3052             (mss = skb_shinfo(skb)->tso_size) != 0) {
3053                 int tcp_opt_len, ip_tcp_len;
3054
3055                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3056                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3057
3058                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3059                                TXD_FLAG_CPU_POST_DMA);
3060
3061                 skb->nh.iph->check = 0;
3062                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3063                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3064                                                       skb->nh.iph->daddr,
3065                                                       0, IPPROTO_TCP, 0);
3066
3067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3068                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3069                                 int tsflags;
3070
3071                                 tsflags = ((skb->nh.iph->ihl - 5) +
3072                                            (tcp_opt_len >> 2));
3073                                 mss |= (tsflags << 11);
3074                         }
3075                 } else {
3076                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3077                                 int tsflags;
3078
3079                                 tsflags = ((skb->nh.iph->ihl - 5) +
3080                                            (tcp_opt_len >> 2));
3081                                 base_flags |= tsflags << 12;
3082                         }
3083                 }
3084         }
3085 #else
3086         mss = 0;
3087 #endif
3088 #if TG3_VLAN_TAG_USED
3089         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3090                 base_flags |= (TXD_FLAG_VLAN |
3091                                (vlan_tx_tag_get(skb) << 16));
3092 #endif
3093
3094         /* Queue skb data, a.k.a. the main skb fragment. */
3095         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3096
3097         tp->tx_buffers[entry].skb = skb;
3098         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3099
3100         would_hit_hwbug = 0;
3101
3102         if (tg3_4g_overflow_test(mapping, len))
3103                 would_hit_hwbug = entry + 1;
3104
3105         tg3_set_txd(tp, entry, mapping, len, base_flags,
3106                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3107
3108         entry = NEXT_TX(entry);
3109
3110         /* Now loop through additional data fragments, and queue them. */
3111         if (skb_shinfo(skb)->nr_frags > 0) {
3112                 unsigned int i, last;
3113
3114                 last = skb_shinfo(skb)->nr_frags - 1;
3115                 for (i = 0; i <= last; i++) {
3116                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3117
3118                         len = frag->size;
3119                         mapping = pci_map_page(tp->pdev,
3120                                                frag->page,
3121                                                frag->page_offset,
3122                                                len, PCI_DMA_TODEVICE);
3123
3124                         tp->tx_buffers[entry].skb = NULL;
3125                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3126
3127                         if (tg3_4g_overflow_test(mapping, len)) {
3128                                 /* Only one should match. */
3129                                 if (would_hit_hwbug)
3130                                         BUG();
3131                                 would_hit_hwbug = entry + 1;
3132                         }
3133
3134                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3135                                 tg3_set_txd(tp, entry, mapping, len,
3136                                             base_flags, (i == last)|(mss << 1));
3137                         else
3138                                 tg3_set_txd(tp, entry, mapping, len,
3139                                             base_flags, (i == last));
3140
3141                         entry = NEXT_TX(entry);
3142                 }
3143         }
3144
3145         if (would_hit_hwbug) {
3146                 u32 last_plus_one = entry;
3147                 u32 start;
3148                 unsigned int len = 0;
3149
3150                 would_hit_hwbug -= 1;
3151                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3152                 entry &= (TG3_TX_RING_SIZE - 1);
3153                 start = entry;
3154                 i = 0;
3155                 while (entry != last_plus_one) {
3156                         if (i == 0)
3157                                 len = skb_headlen(skb);
3158                         else
3159                                 len = skb_shinfo(skb)->frags[i-1].size;
3160
3161                         if (entry == would_hit_hwbug)
3162                                 break;
3163
3164                         i++;
3165                         entry = NEXT_TX(entry);
3166
3167                 }
3168
3169                 /* If the workaround fails due to memory/mapping
3170                  * failure, silently drop this packet.
3171                  */
3172                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3173                                                 entry, len,
3174                                                 last_plus_one,
3175                                                 &start, mss))
3176                         goto out_unlock;
3177
3178                 entry = start;
3179         }
3180
3181         /* Packets are ready, update Tx producer idx local and on card. */
3182         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3183
3184         tp->tx_prod = entry;
3185         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3186                 netif_stop_queue(dev);
3187
3188 out_unlock:
3189         spin_unlock_irqrestore(&tp->tx_lock, flags);
3190
3191         dev->trans_start = jiffies;
3192
3193         return NETDEV_TX_OK;
3194 }
3195
3196 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3197                                int new_mtu)
3198 {
3199         dev->mtu = new_mtu;
3200
3201         if (new_mtu > ETH_DATA_LEN)
3202                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3203         else
3204                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3205 }
3206
3207 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3208 {
3209         struct tg3 *tp = netdev_priv(dev);
3210
3211         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3212                 return -EINVAL;
3213
3214         if (!netif_running(dev)) {
3215                 /* We'll just catch it later when the
3216                  * device is up'd.
3217                  */
3218                 tg3_set_mtu(dev, tp, new_mtu);
3219                 return 0;
3220         }
3221
3222         tg3_netif_stop(tp);
3223         spin_lock_irq(&tp->lock);
3224         spin_lock(&tp->tx_lock);
3225
3226         tg3_halt(tp);
3227
3228         tg3_set_mtu(dev, tp, new_mtu);
3229
3230         tg3_init_hw(tp);
3231
3232         tg3_netif_start(tp);
3233
3234         spin_unlock(&tp->tx_lock);
3235         spin_unlock_irq(&tp->lock);
3236
3237         return 0;
3238 }
3239
3240 /* Free up pending packets in all rx/tx rings.
3241  *
3242  * The chip has been shut down and the driver detached from
3243  * the networking, so no interrupts or new tx packets will
3244  * end up in the driver.  tp->{tx,}lock is not held and we are not
3245  * in an interrupt context and thus may sleep.
3246  */
3247 static void tg3_free_rings(struct tg3 *tp)
3248 {
3249         struct ring_info *rxp;
3250         int i;
3251
3252         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3253                 rxp = &tp->rx_std_buffers[i];
3254
3255                 if (rxp->skb == NULL)
3256                         continue;
3257                 pci_unmap_single(tp->pdev,
3258                                  pci_unmap_addr(rxp, mapping),
3259                                  RX_PKT_BUF_SZ - tp->rx_offset,
3260                                  PCI_DMA_FROMDEVICE);
3261                 dev_kfree_skb_any(rxp->skb);
3262                 rxp->skb = NULL;
3263         }
3264
3265         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3266                 rxp = &tp->rx_jumbo_buffers[i];
3267
3268                 if (rxp->skb == NULL)
3269                         continue;
3270                 pci_unmap_single(tp->pdev,
3271                                  pci_unmap_addr(rxp, mapping),
3272                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3273                                  PCI_DMA_FROMDEVICE);
3274                 dev_kfree_skb_any(rxp->skb);
3275                 rxp->skb = NULL;
3276         }
3277
3278         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3279                 struct tx_ring_info *txp;
3280                 struct sk_buff *skb;
3281                 int j;
3282
3283                 txp = &tp->tx_buffers[i];
3284                 skb = txp->skb;
3285
3286                 if (skb == NULL) {
3287                         i++;
3288                         continue;
3289                 }
3290
3291                 pci_unmap_single(tp->pdev,
3292                                  pci_unmap_addr(txp, mapping),
3293                                  skb_headlen(skb),
3294                                  PCI_DMA_TODEVICE);
3295                 txp->skb = NULL;
3296
3297                 i++;
3298
3299                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3300                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3301                         pci_unmap_page(tp->pdev,
3302                                        pci_unmap_addr(txp, mapping),
3303                                        skb_shinfo(skb)->frags[j].size,
3304                                        PCI_DMA_TODEVICE);
3305                         i++;
3306                 }
3307
3308                 dev_kfree_skb_any(skb);
3309         }
3310 }
3311
3312 /* Initialize tx/rx rings for packet processing.
3313  *
3314  * The chip has been shut down and the driver detached from
3315  * the networking, so no interrupts or new tx packets will
3316  * end up in the driver.  tp->{tx,}lock are held and thus
3317  * we may not sleep.
3318  */
3319 static void tg3_init_rings(struct tg3 *tp)
3320 {
3321         u32 i;
3322
3323         /* Free up all the SKBs. */
3324         tg3_free_rings(tp);
3325
3326         /* Zero out all descriptors. */
3327         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3328         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3329         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3330         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3331
3332         /* Initialize invariants of the rings, we only set this
3333          * stuff once.  This works because the card does not
3334          * write into the rx buffer posting rings.
3335          */
3336         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3337                 struct tg3_rx_buffer_desc *rxd;
3338
3339                 rxd = &tp->rx_std[i];
3340                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3341                         << RXD_LEN_SHIFT;
3342                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3343                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3344                                (i << RXD_OPAQUE_INDEX_SHIFT));
3345         }
3346
3347         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3348                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3349                         struct tg3_rx_buffer_desc *rxd;
3350
3351                         rxd = &tp->rx_jumbo[i];
3352                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3353                                 << RXD_LEN_SHIFT;
3354                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3355                                 RXD_FLAG_JUMBO;
3356                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3357                                (i << RXD_OPAQUE_INDEX_SHIFT));
3358                 }
3359         }
3360
3361         /* Now allocate fresh SKBs for each rx ring. */
3362         for (i = 0; i < tp->rx_pending; i++) {
3363                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3364                                      -1, i) < 0)
3365                         break;
3366         }
3367
3368         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3369                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3370                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3371                                              -1, i) < 0)
3372                                 break;
3373                 }
3374         }
3375 }
3376
3377 /*
3378  * Must not be invoked with interrupt sources disabled and
3379  * the hardware shutdown down.
3380  */
3381 static void tg3_free_consistent(struct tg3 *tp)
3382 {
3383         if (tp->rx_std_buffers) {
3384                 kfree(tp->rx_std_buffers);
3385                 tp->rx_std_buffers = NULL;
3386         }
3387         if (tp->rx_std) {
3388                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3389                                     tp->rx_std, tp->rx_std_mapping);
3390                 tp->rx_std = NULL;
3391         }
3392         if (tp->rx_jumbo) {
3393                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3394                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3395                 tp->rx_jumbo = NULL;
3396         }
3397         if (tp->rx_rcb) {
3398                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3399                                     tp->rx_rcb, tp->rx_rcb_mapping);
3400                 tp->rx_rcb = NULL;
3401         }
3402         if (tp->tx_ring) {
3403                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3404                         tp->tx_ring, tp->tx_desc_mapping);
3405                 tp->tx_ring = NULL;
3406         }
3407         if (tp->hw_status) {
3408                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3409                                     tp->hw_status, tp->status_mapping);
3410                 tp->hw_status = NULL;
3411         }
3412         if (tp->hw_stats) {
3413                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3414                                     tp->hw_stats, tp->stats_mapping);
3415                 tp->hw_stats = NULL;
3416         }
3417 }
3418
3419 /*
3420  * Must not be invoked with interrupt sources disabled and
3421  * the hardware shutdown down.  Can sleep.
3422  */
3423 static int tg3_alloc_consistent(struct tg3 *tp)
3424 {
3425         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3426                                       (TG3_RX_RING_SIZE +
3427                                        TG3_RX_JUMBO_RING_SIZE)) +
3428                                      (sizeof(struct tx_ring_info) *
3429                                       TG3_TX_RING_SIZE),
3430                                      GFP_KERNEL);
3431         if (!tp->rx_std_buffers)
3432                 return -ENOMEM;
3433
3434         memset(tp->rx_std_buffers, 0,
3435                (sizeof(struct ring_info) *
3436                 (TG3_RX_RING_SIZE +
3437                  TG3_RX_JUMBO_RING_SIZE)) +
3438                (sizeof(struct tx_ring_info) *
3439                 TG3_TX_RING_SIZE));
3440
3441         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3442         tp->tx_buffers = (struct tx_ring_info *)
3443                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3444
3445         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3446                                           &tp->rx_std_mapping);
3447         if (!tp->rx_std)
3448                 goto err_out;
3449
3450         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3451                                             &tp->rx_jumbo_mapping);
3452
3453         if (!tp->rx_jumbo)
3454                 goto err_out;
3455
3456         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3457                                           &tp->rx_rcb_mapping);
3458         if (!tp->rx_rcb)
3459                 goto err_out;
3460
3461         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3462                                            &tp->tx_desc_mapping);
3463         if (!tp->tx_ring)
3464                 goto err_out;
3465
3466         tp->hw_status = pci_alloc_consistent(tp->pdev,
3467                                              TG3_HW_STATUS_SIZE,
3468                                              &tp->status_mapping);
3469         if (!tp->hw_status)
3470                 goto err_out;
3471
3472         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3473                                             sizeof(struct tg3_hw_stats),
3474                                             &tp->stats_mapping);
3475         if (!tp->hw_stats)
3476                 goto err_out;
3477
3478         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3479         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3480
3481         return 0;
3482
3483 err_out:
3484         tg3_free_consistent(tp);
3485         return -ENOMEM;
3486 }
3487
3488 #define MAX_WAIT_CNT 1000
3489
3490 /* To stop a block, clear the enable bit and poll till it
3491  * clears.  tp->lock is held.
3492  */
3493 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3494 {
3495         unsigned int i;
3496         u32 val;
3497
3498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3499             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3500                 switch (ofs) {
3501                 case RCVLSC_MODE:
3502                 case DMAC_MODE:
3503                 case MBFREE_MODE:
3504                 case BUFMGR_MODE:
3505                 case MEMARB_MODE:
3506                         /* We can't enable/disable these bits of the
3507                          * 5705/5750, just say success.
3508                          */
3509                         return 0;
3510
3511                 default:
3512                         break;
3513                 };
3514         }
3515
3516         val = tr32(ofs);
3517         val &= ~enable_bit;
3518         tw32_f(ofs, val);
3519
3520         for (i = 0; i < MAX_WAIT_CNT; i++) {
3521                 udelay(100);
3522                 val = tr32(ofs);
3523                 if ((val & enable_bit) == 0)
3524                         break;
3525         }
3526
3527         if (i == MAX_WAIT_CNT) {
3528                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3529                        "ofs=%lx enable_bit=%x\n",
3530                        ofs, enable_bit);
3531                 return -ENODEV;
3532         }
3533
3534         return 0;
3535 }
3536
3537 /* tp->lock is held. */
3538 static int tg3_abort_hw(struct tg3 *tp)
3539 {
3540         int i, err;
3541
3542         tg3_disable_ints(tp);
3543
3544         tp->rx_mode &= ~RX_MODE_ENABLE;
3545         tw32_f(MAC_RX_MODE, tp->rx_mode);
3546         udelay(10);
3547
3548         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3549         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3550         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3551         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3552         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3553         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3554
3555         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3556         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3557         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3558         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3559         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3560         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3561         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3562         if (err)
3563                 goto out;
3564
3565         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3566         tw32_f(MAC_MODE, tp->mac_mode);
3567         udelay(40);
3568
3569         tp->tx_mode &= ~TX_MODE_ENABLE;
3570         tw32_f(MAC_TX_MODE, tp->tx_mode);
3571
3572         for (i = 0; i < MAX_WAIT_CNT; i++) {
3573                 udelay(100);
3574                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3575                         break;
3576         }
3577         if (i >= MAX_WAIT_CNT) {
3578                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3579                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3580                        tp->dev->name, tr32(MAC_TX_MODE));
3581                 return -ENODEV;
3582         }
3583
3584         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3585         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3586         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3587
3588         tw32(FTQ_RESET, 0xffffffff);
3589         tw32(FTQ_RESET, 0x00000000);
3590
3591         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3592         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3593         if (err)
3594                 goto out;
3595
3596         if (tp->hw_status)
3597                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3598         if (tp->hw_stats)
3599                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3600
3601 out:
3602         return err;
3603 }
3604
3605 /* tp->lock is held. */
3606 static int tg3_nvram_lock(struct tg3 *tp)
3607 {
3608         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3609                 int i;
3610
3611                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3612                 for (i = 0; i < 8000; i++) {
3613                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3614                                 break;
3615                         udelay(20);
3616                 }
3617                 if (i == 8000)
3618                         return -ENODEV;
3619         }
3620         return 0;
3621 }
3622
3623 /* tp->lock is held. */
3624 static void tg3_nvram_unlock(struct tg3 *tp)
3625 {
3626         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3627                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3628 }
3629
3630 /* tp->lock is held. */
3631 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3632 {
3633         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3634                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3635
3636         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3637                 switch (kind) {
3638                 case RESET_KIND_INIT:
3639                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3640                                       DRV_STATE_START);
3641                         break;
3642
3643                 case RESET_KIND_SHUTDOWN:
3644                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3645                                       DRV_STATE_UNLOAD);
3646                         break;
3647
3648                 case RESET_KIND_SUSPEND:
3649                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3650                                       DRV_STATE_SUSPEND);
3651                         break;
3652
3653                 default:
3654                         break;
3655                 };
3656         }
3657 }
3658
3659 /* tp->lock is held. */
3660 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3661 {
3662         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3663                 switch (kind) {
3664                 case RESET_KIND_INIT:
3665                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3666                                       DRV_STATE_START_DONE);
3667                         break;
3668
3669                 case RESET_KIND_SHUTDOWN:
3670                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3671                                       DRV_STATE_UNLOAD_DONE);
3672                         break;
3673
3674                 default:
3675                         break;
3676                 };
3677         }
3678 }
3679
3680 /* tp->lock is held. */
3681 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3682 {
3683         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3684                 switch (kind) {
3685                 case RESET_KIND_INIT:
3686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3687                                       DRV_STATE_START);
3688                         break;
3689
3690                 case RESET_KIND_SHUTDOWN:
3691                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3692                                       DRV_STATE_UNLOAD);
3693                         break;
3694
3695                 case RESET_KIND_SUSPEND:
3696                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3697                                       DRV_STATE_SUSPEND);
3698                         break;
3699
3700                 default:
3701                         break;
3702                 };
3703         }
3704 }
3705
3706 static void tg3_stop_fw(struct tg3 *);
3707
3708 /* tp->lock is held. */
3709 static int tg3_chip_reset(struct tg3 *tp)
3710 {
3711         u32 val;
3712         u32 flags_save;
3713         int i;
3714
3715         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3716                 tg3_nvram_lock(tp);
3717
3718         /*
3719          * We must avoid the readl() that normally takes place.
3720          * It locks machines, causes machine checks, and other
3721          * fun things.  So, temporarily disable the 5701
3722          * hardware workaround, while we do the reset.
3723          */
3724         flags_save = tp->tg3_flags;
3725         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3726
3727         /* do the reset */
3728         val = GRC_MISC_CFG_CORECLK_RESET;
3729
3730         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3731                 if (tr32(0x7e2c) == 0x60) {
3732                         tw32(0x7e2c, 0x20);
3733                 }
3734                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3735                         tw32(GRC_MISC_CFG, (1 << 29));
3736                         val |= (1 << 29);
3737                 }
3738         }
3739
3740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3741             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3742                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3743         tw32(GRC_MISC_CFG, val);
3744
3745         /* restore 5701 hardware bug workaround flag */
3746         tp->tg3_flags = flags_save;
3747
3748         /* Unfortunately, we have to delay before the PCI read back.
3749          * Some 575X chips even will not respond to a PCI cfg access
3750          * when the reset command is given to the chip.
3751          *
3752          * How do these hardware designers expect things to work
3753          * properly if the PCI write is posted for a long period
3754          * of time?  It is always necessary to have some method by
3755          * which a register read back can occur to push the write
3756          * out which does the reset.
3757          *
3758          * For most tg3 variants the trick below was working.
3759          * Ho hum...
3760          */
3761         udelay(120);
3762
3763         /* Flush PCI posted writes.  The normal MMIO registers
3764          * are inaccessible at this time so this is the only
3765          * way to make this reliably (actually, this is no longer
3766          * the case, see above).  I tried to use indirect
3767          * register read/write but this upset some 5701 variants.
3768          */
3769         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3770
3771         udelay(120);
3772
3773         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3774                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3775                         int i;
3776                         u32 cfg_val;
3777
3778                         /* Wait for link training to complete.  */
3779                         for (i = 0; i < 5000; i++)
3780                                 udelay(100);
3781
3782                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3783                         pci_write_config_dword(tp->pdev, 0xc4,
3784                                                cfg_val | (1 << 15));
3785                 }
3786                 /* Set PCIE max payload size and clear error status.  */
3787                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3788         }
3789
3790         /* Re-enable indirect register accesses. */
3791         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3792                                tp->misc_host_ctrl);
3793
3794         /* Set MAX PCI retry to zero. */
3795         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3796         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3797             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3798                 val |= PCISTATE_RETRY_SAME_DMA;
3799         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3800
3801         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3802
3803         /* Make sure PCI-X relaxed ordering bit is clear. */
3804         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3805         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3806         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3807
3808         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3809
3810         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3811                 tg3_stop_fw(tp);
3812                 tw32(0x5000, 0x400);
3813         }
3814
3815         tw32(GRC_MODE, tp->grc_mode);
3816
3817         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3818                 u32 val = tr32(0xc4);
3819
3820                 tw32(0xc4, val | (1 << 15));
3821         }
3822
3823         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3825                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3826                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3827                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3828                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3829         }
3830
3831         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3832                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3833                 tw32_f(MAC_MODE, tp->mac_mode);
3834         } else
3835                 tw32_f(MAC_MODE, 0);
3836         udelay(40);
3837
3838         /* Wait for firmware initialization to complete. */
3839         for (i = 0; i < 100000; i++) {
3840                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3841                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3842                         break;
3843                 udelay(10);
3844         }
3845         if (i >= 100000 &&
3846             !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3847                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3848                        "firmware will not restart magic=%08x\n",
3849                        tp->dev->name, val);
3850                 return -ENODEV;
3851         }
3852
3853         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3854             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3855                 u32 val = tr32(0x7c00);
3856
3857                 tw32(0x7c00, val | (1 << 25));
3858         }
3859
3860         /* Reprobe ASF enable state.  */
3861         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3862         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3863         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3864         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3865                 u32 nic_cfg;
3866
3867                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3868                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3869                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3870                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3871                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3872                 }
3873         }
3874
3875         return 0;
3876 }
3877
3878 /* tp->lock is held. */
3879 static void tg3_stop_fw(struct tg3 *tp)
3880 {
3881         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3882                 u32 val;
3883                 int i;
3884
3885                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3886                 val = tr32(GRC_RX_CPU_EVENT);
3887                 val |= (1 << 14);
3888                 tw32(GRC_RX_CPU_EVENT, val);
3889
3890                 /* Wait for RX cpu to ACK the event.  */
3891                 for (i = 0; i < 100; i++) {
3892                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3893                                 break;
3894                         udelay(1);
3895                 }
3896         }
3897 }
3898
3899 /* tp->lock is held. */
3900 static int tg3_halt(struct tg3 *tp)
3901 {
3902         int err;
3903
3904         tg3_stop_fw(tp);
3905
3906         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3907
3908         tg3_abort_hw(tp);
3909         err = tg3_chip_reset(tp);
3910
3911         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3912         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3913
3914         if (err)
3915                 return err;
3916
3917         return 0;
3918 }
3919
3920 #define TG3_FW_RELEASE_MAJOR    0x0
3921 #define TG3_FW_RELASE_MINOR     0x0
3922 #define TG3_FW_RELEASE_FIX      0x0
3923 #define TG3_FW_START_ADDR       0x08000000
3924 #define TG3_FW_TEXT_ADDR        0x08000000
3925 #define TG3_FW_TEXT_LEN         0x9c0
3926 #define TG3_FW_RODATA_ADDR      0x080009c0
3927 #define TG3_FW_RODATA_LEN       0x60
3928 #define TG3_FW_DATA_ADDR        0x08000a40
3929 #define TG3_FW_DATA_LEN         0x20
3930 #define TG3_FW_SBSS_ADDR        0x08000a60
3931 #define TG3_FW_SBSS_LEN         0xc
3932 #define TG3_FW_BSS_ADDR         0x08000a70
3933 #define TG3_FW_BSS_LEN          0x10
3934
3935 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3936         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3937         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3938         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3939         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3940         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3941         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3942         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3943         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3944         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3945         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3946         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3947         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3948         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3949         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3950         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3951         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3952         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3953         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3954         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3955         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3956         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3957         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3958         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3959         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3960         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3961         0, 0, 0, 0, 0, 0,
3962         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3963         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3964         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3965         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3966         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3967         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3968         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3969         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3970         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3971         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3972         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3973         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3974         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3975         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3976         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3977         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3978         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3979         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3980         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3981         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3982         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3983         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3984         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3985         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3986         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3987         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3988         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3989         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3990         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3991         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3992         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3993         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3994         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3995         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3996         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3997         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3998         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3999         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4000         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4001         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4002         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4003         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4004         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4005         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4006         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4007         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4008         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4009         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4010         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4011         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4012         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4013         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4014         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4015         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4016         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4017         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4018         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4019         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4020         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4021         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4022         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4023         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4024         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4025         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4026         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4027 };
4028
4029 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4030         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4031         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4032         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4033         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4034         0x00000000
4035 };
4036
4037 #if 0 /* All zeros, don't eat up space with it. */
4038 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4039         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4040         0x00000000, 0x00000000, 0x00000000, 0x00000000
4041 };
4042 #endif
4043
4044 #define RX_CPU_SCRATCH_BASE     0x30000
4045 #define RX_CPU_SCRATCH_SIZE     0x04000
4046 #define TX_CPU_SCRATCH_BASE     0x34000
4047 #define TX_CPU_SCRATCH_SIZE     0x04000
4048
4049 /* tp->lock is held. */
4050 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4051 {
4052         int i;
4053
4054         if (offset == TX_CPU_BASE &&
4055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4056                 BUG();
4057
4058         if (offset == RX_CPU_BASE) {
4059                 for (i = 0; i < 10000; i++) {
4060                         tw32(offset + CPU_STATE, 0xffffffff);
4061                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4062                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4063                                 break;
4064                 }
4065
4066                 tw32(offset + CPU_STATE, 0xffffffff);
4067                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4068                 udelay(10);
4069         } else {
4070                 for (i = 0; i < 10000; i++) {
4071                         tw32(offset + CPU_STATE, 0xffffffff);
4072                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4073                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4074                                 break;
4075                 }
4076         }
4077
4078         if (i >= 10000) {
4079                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4080                        "and %s CPU\n",
4081                        tp->dev->name,
4082                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4083                 return -ENODEV;
4084         }
4085         return 0;
4086 }
4087
4088 struct fw_info {
4089         unsigned int text_base;
4090         unsigned int text_len;
4091         u32 *text_data;
4092         unsigned int rodata_base;
4093         unsigned int rodata_len;
4094         u32 *rodata_data;
4095         unsigned int data_base;
4096         unsigned int data_len;
4097         u32 *data_data;
4098 };
4099
4100 /* tp->lock is held. */
4101 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4102                                  int cpu_scratch_size, struct fw_info *info)
4103 {
4104         int err, i;
4105         u32 orig_tg3_flags = tp->tg3_flags;
4106         void (*write_op)(struct tg3 *, u32, u32);
4107
4108         if (cpu_base == TX_CPU_BASE &&
4109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4110                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4111                        "TX cpu firmware on %s which is 5705.\n",
4112                        tp->dev->name);
4113                 return -EINVAL;
4114         }
4115
4116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4117                 write_op = tg3_write_mem;
4118         else
4119                 write_op = tg3_write_indirect_reg32;
4120
4121         /* Force use of PCI config space for indirect register
4122          * write calls.
4123          */
4124         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4125
4126         err = tg3_halt_cpu(tp, cpu_base);
4127         if (err)
4128                 goto out;
4129
4130         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4131                 write_op(tp, cpu_scratch_base + i, 0);
4132         tw32(cpu_base + CPU_STATE, 0xffffffff);
4133         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4134         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4135                 write_op(tp, (cpu_scratch_base +
4136                               (info->text_base & 0xffff) +
4137                               (i * sizeof(u32))),
4138                          (info->text_data ?
4139                           info->text_data[i] : 0));
4140         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4141                 write_op(tp, (cpu_scratch_base +
4142                               (info->rodata_base & 0xffff) +
4143                               (i * sizeof(u32))),
4144                          (info->rodata_data ?
4145                           info->rodata_data[i] : 0));
4146         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4147                 write_op(tp, (cpu_scratch_base +
4148                               (info->data_base & 0xffff) +
4149                               (i * sizeof(u32))),
4150                          (info->data_data ?
4151                           info->data_data[i] : 0));
4152
4153         err = 0;
4154
4155 out:
4156         tp->tg3_flags = orig_tg3_flags;
4157         return err;
4158 }
4159
4160 /* tp->lock is held. */
4161 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4162 {
4163         struct fw_info info;
4164         int err, i;
4165
4166         info.text_base = TG3_FW_TEXT_ADDR;
4167         info.text_len = TG3_FW_TEXT_LEN;
4168         info.text_data = &tg3FwText[0];
4169         info.rodata_base = TG3_FW_RODATA_ADDR;
4170         info.rodata_len = TG3_FW_RODATA_LEN;
4171         info.rodata_data = &tg3FwRodata[0];
4172         info.data_base = TG3_FW_DATA_ADDR;
4173         info.data_len = TG3_FW_DATA_LEN;
4174         info.data_data = NULL;
4175
4176         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4177                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4178                                     &info);
4179         if (err)
4180                 return err;
4181
4182         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4183                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4184                                     &info);
4185         if (err)
4186                 return err;
4187
4188         /* Now startup only the RX cpu. */
4189         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4190         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4191
4192         for (i = 0; i < 5; i++) {
4193                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4194                         break;
4195                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4196                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4197                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4198                 udelay(1000);
4199         }
4200         if (i >= 5) {
4201                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4202                        "to set RX CPU PC, is %08x should be %08x\n",
4203                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4204                        TG3_FW_TEXT_ADDR);
4205                 return -ENODEV;
4206         }
4207         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4208         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4209
4210         return 0;
4211 }
4212
4213 #if TG3_TSO_SUPPORT != 0
4214
4215 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4216 #define TG3_TSO_FW_RELASE_MINOR         0x6
4217 #define TG3_TSO_FW_RELEASE_FIX          0x0
4218 #define TG3_TSO_FW_START_ADDR           0x08000000
4219 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4220 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4221 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4222 #define TG3_TSO_FW_RODATA_LEN           0x60
4223 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4224 #define TG3_TSO_FW_DATA_LEN             0x30
4225 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4226 #define TG3_TSO_FW_SBSS_LEN             0x2c
4227 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4228 #define TG3_TSO_FW_BSS_LEN              0x894
4229
4230 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4231         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4232         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4233         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4234         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4235         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4236         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4237         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4238         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4239         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4240         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4241         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4242         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4243         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4244         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4245         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4246         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4247         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4248         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4249         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4250         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4251         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4252         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4253         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4254         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4255         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4256         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4257         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4258         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4259         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4260         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4261         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4262         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4263         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4264         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4265         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4266         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4267         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4268         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4269         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4270         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4271         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4272         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4273         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4274         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4275         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4276         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4277         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4278         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4279         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4280         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4281         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4282         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4283         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4284         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4285         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4286         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4287         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4288         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4289         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4290         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4291         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4292         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4293         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4294         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4295         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4296         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4297         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4298         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4299         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4300         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4301         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4302         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4303         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4304         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4305         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4306         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4307         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4308         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4309         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4310         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4311         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4312         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4313         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4314         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4315         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4316         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4317         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4318         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4319         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4320         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4321         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4322         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4323         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4324         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4325         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4326         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4327         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4328         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4329         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4330         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4331         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4332         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4333         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4334         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4335         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4336         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4337         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4338         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4339         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4340         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4341         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4342         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4343         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4344         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4345         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4346         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4347         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4348         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4349         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4350         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4351         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4352         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4353         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4354         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4355         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4356         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4357         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4358         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4359         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4360         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4361         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4362         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4363         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4364         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4365         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4366         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4367         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4368         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4369         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4370         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4371         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4372         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4373         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4374         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4375         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4376         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4377         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4378         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4379         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4380         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4381         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4382         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4383         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4384         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4385         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4386         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4387         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4388         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4389         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4390         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4391         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4392         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4393         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4394         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4395         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4396         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4397         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4398         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4399         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4400         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4401         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4402         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4403         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4404         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4405         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4406         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4407         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4408         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4409         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4410         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4411         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4412         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4413         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4414         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4415         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4416         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4417         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4418         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4419         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4420         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4421         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4422         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4423         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4424         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4425         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4426         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4427         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4428         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4429         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4430         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4431         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4432         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4433         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4434         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4435         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4436         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4437         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4438         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4439         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4440         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4441         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4442         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4443         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4444         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4445         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4446         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4447         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4448         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4449         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4450         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4451         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4452         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4453         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4454         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4455         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4456         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4457         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4458         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4459         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4460         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4461         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4462         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4463         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4464         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4465         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4466         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4467         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4468         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4469         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4470         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4471         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4472         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4473         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4474         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4475         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4476         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4477         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4478         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4479         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4480         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4481         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4482         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4483         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4484         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4485         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4486         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4487         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4488         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4489         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4490         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4491         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4492         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4493         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4494         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4495         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4496         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4497         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4498         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4499         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4500         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4501         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4502         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4503         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4504         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4505         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4506         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4507         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4508         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4509         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4510         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4511         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4512         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4513         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4514         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4515 };
4516
4517 u32 tg3TsoFwRodata[] = {
4518         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4519         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4520         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4521         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4522         0x00000000,
4523 };
4524
4525 u32 tg3TsoFwData[] = {
4526         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4527         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4528         0x00000000,
4529 };
4530
4531 /* 5705 needs a special version of the TSO firmware.  */
4532 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4533 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4534 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4535 #define TG3_TSO5_FW_START_ADDR          0x00010000
4536 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4537 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4538 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4539 #define TG3_TSO5_FW_RODATA_LEN          0x50
4540 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4541 #define TG3_TSO5_FW_DATA_LEN            0x20
4542 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4543 #define TG3_TSO5_FW_SBSS_LEN            0x28
4544 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4545 #define TG3_TSO5_FW_BSS_LEN             0x88
4546
4547 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4548         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4549         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4550         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4551         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4552         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4553         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4554         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4555         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4556         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4557         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4558         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4559         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4560         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4561         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4562         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4563         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4564         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4565         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4566         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4567         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4568         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4569         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4570         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4571         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4572         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4573         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4574         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4575         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4576         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4577         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4578         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4579         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4580         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4581         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4582         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4583         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4584         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4585         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4586         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4587         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4588         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4589         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4590         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4591         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4592         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4593         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4594         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4595         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4596         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4597         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4598         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4599         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4600         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4601         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4602         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4603         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4604         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4605         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4606         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4607         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4608         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4609         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4610         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4611         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4612         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4613         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4614         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4615         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4616         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4617         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4618         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4619         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4620         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4621         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4622         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4623         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4624         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4625         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4626         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4627         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4628         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4629         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4630         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4631         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4632         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4633         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4634         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4635         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4636         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4637         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4638         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4639         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4640         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4641         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4642         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4643         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4644         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4645         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4646         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4647         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4648         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4649         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4650         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4651         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4652         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4653         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4654         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4655         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4656         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4657         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4658         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4659         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4660         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4661         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4662         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4663         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4664         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4665         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4666         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4667         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4668         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4669         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4670         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4671         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4672         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4673         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4674         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4675         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4676         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4677         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4678         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4679         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4680         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4681         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4682         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4683         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4684         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4685         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4686         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4687         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4688         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4689         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4690         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4691         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4692         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4693         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4694         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4695         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4696         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4697         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4698         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4699         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4700         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4701         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4702         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4703         0x00000000, 0x00000000, 0x00000000,
4704 };
4705
4706 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4707         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4708         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4709         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4710         0x00000000, 0x00000000, 0x00000000,
4711 };
4712
4713 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4714         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4715         0x00000000, 0x00000000, 0x00000000,
4716 };
4717
4718 /* tp->lock is held. */
4719 static int tg3_load_tso_firmware(struct tg3 *tp)
4720 {
4721         struct fw_info info;
4722         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4723         int err, i;
4724
4725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4726                 return 0;
4727
4728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4729                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4730                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4731                 info.text_data = &tg3Tso5FwText[0];
4732                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4733                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4734                 info.rodata_data = &tg3Tso5FwRodata[0];
4735                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4736                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4737                 info.data_data = &tg3Tso5FwData[0];
4738                 cpu_base = RX_CPU_BASE;
4739                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4740                 cpu_scratch_size = (info.text_len +
4741                                     info.rodata_len +
4742                                     info.data_len +
4743                                     TG3_TSO5_FW_SBSS_LEN +
4744                                     TG3_TSO5_FW_BSS_LEN);
4745         } else {
4746                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4747                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4748                 info.text_data = &tg3TsoFwText[0];
4749                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4750                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4751                 info.rodata_data = &tg3TsoFwRodata[0];
4752                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4753                 info.data_len = TG3_TSO_FW_DATA_LEN;
4754                 info.data_data = &tg3TsoFwData[0];
4755                 cpu_base = TX_CPU_BASE;
4756                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4757                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4758         }
4759
4760         err = tg3_load_firmware_cpu(tp, cpu_base,
4761                                     cpu_scratch_base, cpu_scratch_size,
4762                                     &info);
4763         if (err)
4764                 return err;
4765
4766         /* Now startup the cpu. */
4767         tw32(cpu_base + CPU_STATE, 0xffffffff);
4768         tw32_f(cpu_base + CPU_PC,    info.text_base);
4769
4770         for (i = 0; i < 5; i++) {
4771                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4772                         break;
4773                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4774                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4775                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4776                 udelay(1000);
4777         }
4778         if (i >= 5) {
4779                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4780                        "to set CPU PC, is %08x should be %08x\n",
4781                        tp->dev->name, tr32(cpu_base + CPU_PC),
4782                        info.text_base);
4783                 return -ENODEV;
4784         }
4785         tw32(cpu_base + CPU_STATE, 0xffffffff);
4786         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4787         return 0;
4788 }
4789
4790 #endif /* TG3_TSO_SUPPORT != 0 */
4791
4792 /* tp->lock is held. */
4793 static void __tg3_set_mac_addr(struct tg3 *tp)
4794 {
4795         u32 addr_high, addr_low;
4796         int i;
4797
4798         addr_high = ((tp->dev->dev_addr[0] << 8) |
4799                      tp->dev->dev_addr[1]);
4800         addr_low = ((tp->dev->dev_addr[2] << 24) |
4801                     (tp->dev->dev_addr[3] << 16) |
4802                     (tp->dev->dev_addr[4] <<  8) |
4803                     (tp->dev->dev_addr[5] <<  0));
4804         for (i = 0; i < 4; i++) {
4805                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4806                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4807         }
4808
4809         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4810             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4811             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4812                 for (i = 0; i < 12; i++) {
4813                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4814                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4815                 }
4816         }
4817
4818         addr_high = (tp->dev->dev_addr[0] +
4819                      tp->dev->dev_addr[1] +
4820                      tp->dev->dev_addr[2] +
4821                      tp->dev->dev_addr[3] +
4822                      tp->dev->dev_addr[4] +
4823                      tp->dev->dev_addr[5]) &
4824                 TX_BACKOFF_SEED_MASK;
4825         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4826 }
4827
4828 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4829 {
4830         struct tg3 *tp = netdev_priv(dev);
4831         struct sockaddr *addr = p;
4832
4833         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4834
4835         spin_lock_irq(&tp->lock);
4836         __tg3_set_mac_addr(tp);
4837         spin_unlock_irq(&tp->lock);
4838
4839         return 0;
4840 }
4841
4842 /* tp->lock is held. */
4843 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4844                            dma_addr_t mapping, u32 maxlen_flags,
4845                            u32 nic_addr)
4846 {
4847         tg3_write_mem(tp,
4848                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4849                       ((u64) mapping >> 32));
4850         tg3_write_mem(tp,
4851                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4852                       ((u64) mapping & 0xffffffff));
4853         tg3_write_mem(tp,
4854                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4855                        maxlen_flags);
4856
4857         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4858                 tg3_write_mem(tp,
4859                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4860                               nic_addr);
4861 }
4862
4863 static void __tg3_set_rx_mode(struct net_device *);
4864
4865 /* tp->lock is held. */
4866 static int tg3_reset_hw(struct tg3 *tp)
4867 {
4868         u32 val, rdmac_mode;
4869         int i, err, limit;
4870
4871         tg3_disable_ints(tp);
4872
4873         tg3_stop_fw(tp);
4874
4875         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4876
4877         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4878                 err = tg3_abort_hw(tp);
4879                 if (err)
4880                         return err;
4881         }
4882
4883         err = tg3_chip_reset(tp);
4884         if (err)
4885                 return err;
4886
4887         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4888
4889         /* This works around an issue with Athlon chipsets on
4890          * B3 tigon3 silicon.  This bit has no effect on any
4891          * other revision.  But do not set this on PCI Express
4892          * chips.
4893          */
4894         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4895                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4896         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4897
4898         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4899             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4900                 val = tr32(TG3PCI_PCISTATE);
4901                 val |= PCISTATE_RETRY_SAME_DMA;
4902                 tw32(TG3PCI_PCISTATE, val);
4903         }
4904
4905         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4906                 /* Enable some hw fixes.  */
4907                 val = tr32(TG3PCI_MSI_DATA);
4908                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4909                 tw32(TG3PCI_MSI_DATA, val);
4910         }
4911
4912         /* Descriptor ring init may make accesses to the
4913          * NIC SRAM area to setup the TX descriptors, so we
4914          * can only do this after the hardware has been
4915          * successfully reset.
4916          */
4917         tg3_init_rings(tp);
4918
4919         /* This value is determined during the probe time DMA
4920          * engine test, tg3_test_dma.
4921          */
4922         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4923
4924         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4925                           GRC_MODE_4X_NIC_SEND_RINGS |
4926                           GRC_MODE_NO_TX_PHDR_CSUM |
4927                           GRC_MODE_NO_RX_PHDR_CSUM);
4928         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4929         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4930                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4931         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4932                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4933
4934         tw32(GRC_MODE,
4935              tp->grc_mode |
4936              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4937
4938         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4939         val = tr32(GRC_MISC_CFG);
4940         val &= ~0xff;
4941         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4942         tw32(GRC_MISC_CFG, val);
4943
4944         /* Initialize MBUF/DESC pool. */
4945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4946                 /* Do nothing.  */
4947         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4948                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4949                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4950                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4951                 else
4952                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4953                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4954                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4955         }
4956 #if TG3_TSO_SUPPORT != 0
4957         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4958                 int fw_len;
4959
4960                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4961                           TG3_TSO5_FW_RODATA_LEN +
4962                           TG3_TSO5_FW_DATA_LEN +
4963                           TG3_TSO5_FW_SBSS_LEN +
4964                           TG3_TSO5_FW_BSS_LEN);
4965                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4966                 tw32(BUFMGR_MB_POOL_ADDR,
4967                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4968                 tw32(BUFMGR_MB_POOL_SIZE,
4969                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4970         }
4971 #endif
4972
4973         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4974                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4975                      tp->bufmgr_config.mbuf_read_dma_low_water);
4976                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4977                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4978                 tw32(BUFMGR_MB_HIGH_WATER,
4979                      tp->bufmgr_config.mbuf_high_water);
4980         } else {
4981                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4982                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4983                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4984                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4985                 tw32(BUFMGR_MB_HIGH_WATER,
4986                      tp->bufmgr_config.mbuf_high_water_jumbo);
4987         }
4988         tw32(BUFMGR_DMA_LOW_WATER,
4989              tp->bufmgr_config.dma_low_water);
4990         tw32(BUFMGR_DMA_HIGH_WATER,
4991              tp->bufmgr_config.dma_high_water);
4992
4993         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4994         for (i = 0; i < 2000; i++) {
4995                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4996                         break;
4997                 udelay(10);
4998         }
4999         if (i >= 2000) {
5000                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5001                        tp->dev->name);
5002                 return -ENODEV;
5003         }
5004
5005         /* Setup replenish threshold. */
5006         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5007
5008         /* Initialize TG3_BDINFO's at:
5009          *  RCVDBDI_STD_BD:     standard eth size rx ring
5010          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5011          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5012          *
5013          * like so:
5014          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5015          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5016          *                              ring attribute flags
5017          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5018          *
5019          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5020          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5021          *
5022          * The size of each ring is fixed in the firmware, but the location is
5023          * configurable.
5024          */
5025         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5026              ((u64) tp->rx_std_mapping >> 32));
5027         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5028              ((u64) tp->rx_std_mapping & 0xffffffff));
5029         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5030              NIC_SRAM_RX_BUFFER_DESC);
5031
5032         /* Don't even try to program the JUMBO/MINI buffer descriptor
5033          * configs on 5705.
5034          */
5035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5037                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5038                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5039         } else {
5040                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5041                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5042
5043                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5044                      BDINFO_FLAGS_DISABLED);
5045
5046                 /* Setup replenish threshold. */
5047                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5048
5049                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5050                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5051                              ((u64) tp->rx_jumbo_mapping >> 32));
5052                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5053                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5054                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5055                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5056                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5057                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5058                 } else {
5059                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5060                              BDINFO_FLAGS_DISABLED);
5061                 }
5062
5063         }
5064
5065         /* There is only one send ring on 5705/5750, no need to explicitly
5066          * disable the others.
5067          */
5068         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5069             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5070                 /* Clear out send RCB ring in SRAM. */
5071                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5072                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5073                                       BDINFO_FLAGS_DISABLED);
5074         }
5075
5076         tp->tx_prod = 0;
5077         tp->tx_cons = 0;
5078         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5079         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5080
5081         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5082                        tp->tx_desc_mapping,
5083                        (TG3_TX_RING_SIZE <<
5084                         BDINFO_FLAGS_MAXLEN_SHIFT),
5085                        NIC_SRAM_TX_BUFFER_DESC);
5086
5087         /* There is only one receive return ring on 5705/5750, no need
5088          * to explicitly disable the others.
5089          */
5090         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5091             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5092                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5093                      i += TG3_BDINFO_SIZE) {
5094                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5095                                       BDINFO_FLAGS_DISABLED);
5096                 }
5097         }
5098
5099         tp->rx_rcb_ptr = 0;
5100         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5101
5102         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5103                        tp->rx_rcb_mapping,
5104                        (TG3_RX_RCB_RING_SIZE(tp) <<
5105                         BDINFO_FLAGS_MAXLEN_SHIFT),
5106                        0);
5107
5108         tp->rx_std_ptr = tp->rx_pending;
5109         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5110                      tp->rx_std_ptr);
5111
5112         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5113                                                 tp->rx_jumbo_pending : 0;
5114         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5115                      tp->rx_jumbo_ptr);
5116
5117         /* Initialize MAC address and backoff seed. */
5118         __tg3_set_mac_addr(tp);
5119
5120         /* MTU + ethernet header + FCS + optional VLAN tag */
5121         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5122
5123         /* The slot time is changed by tg3_setup_phy if we
5124          * run at gigabit with half duplex.
5125          */
5126         tw32(MAC_TX_LENGTHS,
5127              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5128              (6 << TX_LENGTHS_IPG_SHIFT) |
5129              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5130
5131         /* Receive rules. */
5132         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5133         tw32(RCVLPC_CONFIG, 0x0181);
5134
5135         /* Calculate RDMAC_MODE setting early, we need it to determine
5136          * the RCVLPC_STATE_ENABLE mask.
5137          */
5138         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5139                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5140                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5141                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5142                       RDMAC_MODE_LNGREAD_ENAB);
5143         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5144                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5145         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5146              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5147             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5148                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5149                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5150                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5151                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5152                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5153                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5154                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5155                 }
5156         }
5157
5158 #if TG3_TSO_SUPPORT != 0
5159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5160                 rdmac_mode |= (1 << 27);
5161 #endif
5162
5163         /* Receive/send statistics. */
5164         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5165             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5166                 val = tr32(RCVLPC_STATS_ENABLE);
5167                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5168                 tw32(RCVLPC_STATS_ENABLE, val);
5169         } else {
5170                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5171         }
5172         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5173         tw32(SNDDATAI_STATSENAB, 0xffffff);
5174         tw32(SNDDATAI_STATSCTRL,
5175              (SNDDATAI_SCTRL_ENABLE |
5176               SNDDATAI_SCTRL_FASTUPD));
5177
5178         /* Setup host coalescing engine. */
5179         tw32(HOSTCC_MODE, 0);
5180         for (i = 0; i < 2000; i++) {
5181                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5182                         break;
5183                 udelay(10);
5184         }
5185
5186         tw32(HOSTCC_RXCOL_TICKS, 0);
5187         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5188         tw32(HOSTCC_RXMAX_FRAMES, 1);
5189         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5190         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5191             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5192                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5193                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5194         }
5195         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5196         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5197
5198         /* set status block DMA address */
5199         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5200              ((u64) tp->status_mapping >> 32));
5201         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5202              ((u64) tp->status_mapping & 0xffffffff));
5203
5204         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5205             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5206                 /* Status/statistics block address.  See tg3_timer,
5207                  * the tg3_periodic_fetch_stats call there, and
5208                  * tg3_get_stats to see how this works for 5705/5750 chips.
5209                  */
5210                 tw32(HOSTCC_STAT_COAL_TICKS,
5211                      DEFAULT_STAT_COAL_TICKS);
5212                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5213                      ((u64) tp->stats_mapping >> 32));
5214                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5215                      ((u64) tp->stats_mapping & 0xffffffff));
5216                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5217                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5218         }
5219
5220         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5221
5222         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5223         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5224         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5225             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5226                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5227
5228         /* Clear statistics/status block in chip, and status block in ram. */
5229         for (i = NIC_SRAM_STATS_BLK;
5230              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5231              i += sizeof(u32)) {
5232                 tg3_write_mem(tp, i, 0);
5233                 udelay(40);
5234         }
5235         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5236
5237         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5238                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5239         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5240         udelay(40);
5241
5242         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5244                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5245                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5246         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5247         udelay(100);
5248
5249         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5250         tr32(MAILBOX_INTERRUPT_0);
5251
5252         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5253             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5254                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5255                 udelay(40);
5256         }
5257
5258         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5259                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5260                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5261                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5262                WDMAC_MODE_LNGREAD_ENAB);
5263
5264         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5265              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5266             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5267                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5268                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5269                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5270                         /* nothing */
5271                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5272                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5273                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5274                         val |= WDMAC_MODE_RX_ACCEL;
5275                 }
5276         }
5277
5278         tw32_f(WDMAC_MODE, val);
5279         udelay(40);
5280
5281         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5282                 val = tr32(TG3PCI_X_CAPS);
5283                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5284                         val &= ~PCIX_CAPS_BURST_MASK;
5285                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5286                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5287                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5288                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5289                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5290                                 val |= (tp->split_mode_max_reqs <<
5291                                         PCIX_CAPS_SPLIT_SHIFT);
5292                 }
5293                 tw32(TG3PCI_X_CAPS, val);
5294         }
5295
5296         tw32_f(RDMAC_MODE, rdmac_mode);
5297         udelay(40);
5298
5299         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5300         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5301             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5302                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5303         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5304         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5305         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5306         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5307         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5308 #if TG3_TSO_SUPPORT != 0
5309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5310                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5311 #endif
5312         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5313         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5314
5315         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5316                 err = tg3_load_5701_a0_firmware_fix(tp);
5317                 if (err)
5318                         return err;
5319         }
5320
5321 #if TG3_TSO_SUPPORT != 0
5322         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5323                 err = tg3_load_tso_firmware(tp);
5324                 if (err)
5325                         return err;
5326         }
5327 #endif
5328
5329         tp->tx_mode = TX_MODE_ENABLE;
5330         tw32_f(MAC_TX_MODE, tp->tx_mode);
5331         udelay(100);
5332
5333         tp->rx_mode = RX_MODE_ENABLE;
5334         tw32_f(MAC_RX_MODE, tp->rx_mode);
5335         udelay(10);
5336
5337         if (tp->link_config.phy_is_low_power) {
5338                 tp->link_config.phy_is_low_power = 0;
5339                 tp->link_config.speed = tp->link_config.orig_speed;
5340                 tp->link_config.duplex = tp->link_config.orig_duplex;
5341                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5342         }
5343
5344         tp->mi_mode = MAC_MI_MODE_BASE;
5345         tw32_f(MAC_MI_MODE, tp->mi_mode);
5346         udelay(80);
5347
5348         tw32(MAC_LED_CTRL, tp->led_ctrl);
5349
5350         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5351         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5352                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5353                 udelay(10);
5354         }
5355         tw32_f(MAC_RX_MODE, tp->rx_mode);
5356         udelay(10);
5357
5358         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5359                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5360                         /* Set drive transmission level to 1.2V  */
5361                         val = tr32(MAC_SERDES_CFG);
5362                         val &= 0xfffff000;
5363                         val |= 0x880;
5364                         tw32(MAC_SERDES_CFG, val);
5365                 }
5366                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5367                         tw32(MAC_SERDES_CFG, 0x616000);
5368         }
5369
5370         /* Prevent chip from dropping frames when flow control
5371          * is enabled.
5372          */
5373         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5374
5375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5376             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5377                 /* Use hardware link auto-negotiation */
5378                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5379         }
5380
5381         err = tg3_setup_phy(tp, 1);
5382         if (err)
5383                 return err;
5384
5385         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5386                 u32 tmp;
5387
5388                 /* Clear CRC stats. */
5389                 tg3_readphy(tp, 0x1e, &tmp);
5390                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5391                 tg3_readphy(tp, 0x14, &tmp);
5392         }
5393
5394         __tg3_set_rx_mode(tp->dev);
5395
5396         /* Initialize receive rules. */
5397         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5398         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5399         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5400         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5401
5402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5404                 limit = 8;
5405         else
5406                 limit = 16;
5407         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5408                 limit -= 4;
5409         switch (limit) {
5410         case 16:
5411                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5412         case 15:
5413                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5414         case 14:
5415                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5416         case 13:
5417                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5418         case 12:
5419                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5420         case 11:
5421                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5422         case 10:
5423                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5424         case 9:
5425                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5426         case 8:
5427                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5428         case 7:
5429                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5430         case 6:
5431                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5432         case 5:
5433                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5434         case 4:
5435                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5436         case 3:
5437                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5438         case 2:
5439         case 1:
5440
5441         default:
5442                 break;
5443         };
5444
5445         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5446
5447         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5448                 tg3_enable_ints(tp);
5449
5450         return 0;
5451 }
5452
5453 /* Called at device open time to get the chip ready for
5454  * packet processing.  Invoked with tp->lock held.
5455  */
5456 static int tg3_init_hw(struct tg3 *tp)
5457 {
5458         int err;
5459
5460         /* Force the chip into D0. */
5461         err = tg3_set_power_state(tp, 0);
5462         if (err)
5463                 goto out;
5464
5465         tg3_switch_clocks(tp);
5466
5467         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5468
5469         err = tg3_reset_hw(tp);
5470
5471 out:
5472         return err;
5473 }
5474
5475 #define TG3_STAT_ADD32(PSTAT, REG) \
5476 do {    u32 __val = tr32(REG); \
5477         (PSTAT)->low += __val; \
5478         if ((PSTAT)->low < __val) \
5479                 (PSTAT)->high += 1; \
5480 } while (0)
5481
5482 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5483 {
5484         struct tg3_hw_stats *sp = tp->hw_stats;
5485
5486         if (!netif_carrier_ok(tp->dev))
5487                 return;
5488
5489         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5490         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5491         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5492         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5493         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5494         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5495         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5496         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5497         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5498         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5499         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5500         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5501         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5502
5503         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5504         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5505         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5506         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5507         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5508         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5509         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5510         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5511         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5512         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5513         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5514         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5515         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5516         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5517 }
5518
5519 static void tg3_timer(unsigned long __opaque)
5520 {
5521         struct tg3 *tp = (struct tg3 *) __opaque;
5522         unsigned long flags;
5523
5524         spin_lock_irqsave(&tp->lock, flags);
5525         spin_lock(&tp->tx_lock);
5526
5527         /* All of this garbage is because when using non-tagged
5528          * IRQ status the mailbox/status_block protocol the chip
5529          * uses with the cpu is race prone.
5530          */
5531         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5532                 tw32(GRC_LOCAL_CTRL,
5533                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5534         } else {
5535                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5536                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5537         }
5538
5539         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5540                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5541                 spin_unlock(&tp->tx_lock);
5542                 spin_unlock_irqrestore(&tp->lock, flags);
5543                 schedule_work(&tp->reset_task);
5544                 return;
5545         }
5546
5547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5548             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5549                 tg3_periodic_fetch_stats(tp);
5550
5551         /* This part only runs once per second. */
5552         if (!--tp->timer_counter) {
5553                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5554                         u32 mac_stat;
5555                         int phy_event;
5556
5557                         mac_stat = tr32(MAC_STATUS);
5558
5559                         phy_event = 0;
5560                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5561                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5562                                         phy_event = 1;
5563                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5564                                 phy_event = 1;
5565
5566                         if (phy_event)
5567                                 tg3_setup_phy(tp, 0);
5568                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5569                         u32 mac_stat = tr32(MAC_STATUS);
5570                         int need_setup = 0;
5571
5572                         if (netif_carrier_ok(tp->dev) &&
5573                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5574                                 need_setup = 1;
5575                         }
5576                         if (! netif_carrier_ok(tp->dev) &&
5577                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5578                                          MAC_STATUS_SIGNAL_DET))) {
5579                                 need_setup = 1;
5580                         }
5581                         if (need_setup) {
5582                                 tw32_f(MAC_MODE,
5583                                      (tp->mac_mode &
5584                                       ~MAC_MODE_PORT_MODE_MASK));
5585                                 udelay(40);
5586                                 tw32_f(MAC_MODE, tp->mac_mode);
5587                                 udelay(40);
5588                                 tg3_setup_phy(tp, 0);
5589                         }
5590                 }
5591
5592                 tp->timer_counter = tp->timer_multiplier;
5593         }
5594
5595         /* Heartbeat is only sent once every 120 seconds.  */
5596         if (!--tp->asf_counter) {
5597                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5598                         u32 val;
5599
5600                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5601                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5602                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5603                         val = tr32(GRC_RX_CPU_EVENT);
5604                         val |= (1 << 14);
5605                         tw32(GRC_RX_CPU_EVENT, val);
5606                 }
5607                 tp->asf_counter = tp->asf_multiplier;
5608         }
5609
5610         spin_unlock(&tp->tx_lock);
5611         spin_unlock_irqrestore(&tp->lock, flags);
5612
5613         tp->timer.expires = jiffies + tp->timer_offset;
5614         add_timer(&tp->timer);
5615 }
5616
5617 static int tg3_open(struct net_device *dev)
5618 {
5619         struct tg3 *tp = netdev_priv(dev);
5620         int err;
5621
5622         spin_lock_irq(&tp->lock);
5623         spin_lock(&tp->tx_lock);
5624
5625         tg3_disable_ints(tp);
5626         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5627
5628         spin_unlock(&tp->tx_lock);
5629         spin_unlock_irq(&tp->lock);
5630
5631         /* The placement of this call is tied
5632          * to the setup and use of Host TX descriptors.
5633          */
5634         err = tg3_alloc_consistent(tp);
5635         if (err)
5636                 return err;
5637
5638         err = request_irq(dev->irq, tg3_interrupt,
5639                           SA_SHIRQ, dev->name, dev);
5640
5641         if (err) {
5642                 tg3_free_consistent(tp);
5643                 return err;
5644         }
5645
5646         spin_lock_irq(&tp->lock);
5647         spin_lock(&tp->tx_lock);
5648
5649         err = tg3_init_hw(tp);
5650         if (err) {
5651                 tg3_halt(tp);
5652                 tg3_free_rings(tp);
5653         } else {
5654                 tp->timer_offset = HZ / 10;
5655                 tp->timer_counter = tp->timer_multiplier = 10;
5656                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5657
5658                 init_timer(&tp->timer);
5659                 tp->timer.expires = jiffies + tp->timer_offset;
5660                 tp->timer.data = (unsigned long) tp;
5661                 tp->timer.function = tg3_timer;
5662                 add_timer(&tp->timer);
5663
5664                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5665         }
5666
5667         spin_unlock(&tp->tx_lock);
5668         spin_unlock_irq(&tp->lock);
5669
5670         if (err) {
5671                 free_irq(dev->irq, dev);
5672                 tg3_free_consistent(tp);
5673                 return err;
5674         }
5675
5676         spin_lock_irq(&tp->lock);
5677         spin_lock(&tp->tx_lock);
5678
5679         tg3_enable_ints(tp);
5680
5681         spin_unlock(&tp->tx_lock);
5682         spin_unlock_irq(&tp->lock);
5683
5684         netif_start_queue(dev);
5685
5686         return 0;
5687 }
5688
5689 #if 0
5690 /*static*/ void tg3_dump_state(struct tg3 *tp)
5691 {
5692         u32 val32, val32_2, val32_3, val32_4, val32_5;
5693         u16 val16;
5694         int i;
5695
5696         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5697         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5698         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5699                val16, val32);
5700
5701         /* MAC block */
5702         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5703                tr32(MAC_MODE), tr32(MAC_STATUS));
5704         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5705                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5706         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5707                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5708         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5709                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5710
5711         /* Send data initiator control block */
5712         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5713                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5714         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5715                tr32(SNDDATAI_STATSCTRL));
5716
5717         /* Send data completion control block */
5718         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5719
5720         /* Send BD ring selector block */
5721         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5722                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5723
5724         /* Send BD initiator control block */
5725         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5726                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5727
5728         /* Send BD completion control block */
5729         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5730
5731         /* Receive list placement control block */
5732         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5733                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5734         printk("       RCVLPC_STATSCTRL[%08x]\n",
5735                tr32(RCVLPC_STATSCTRL));
5736
5737         /* Receive data and receive BD initiator control block */
5738         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5739                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5740
5741         /* Receive data completion control block */
5742         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5743                tr32(RCVDCC_MODE));
5744
5745         /* Receive BD initiator control block */
5746         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5747                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5748
5749         /* Receive BD completion control block */
5750         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5751                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5752
5753         /* Receive list selector control block */
5754         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5755                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5756
5757         /* Mbuf cluster free block */
5758         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5759                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5760
5761         /* Host coalescing control block */
5762         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5763                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5764         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5765                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5766                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5767         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5768                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5769                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5770         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5771                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5772         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5773                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5774
5775         /* Memory arbiter control block */
5776         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5777                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5778
5779         /* Buffer manager control block */
5780         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5781                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5782         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5783                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5784         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5785                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5786                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5787                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5788
5789         /* Read DMA control block */
5790         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5791                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5792
5793         /* Write DMA control block */
5794         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5795                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5796
5797         /* DMA completion block */
5798         printk("DEBUG: DMAC_MODE[%08x]\n",
5799                tr32(DMAC_MODE));
5800
5801         /* GRC block */
5802         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5803                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5804         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5805                tr32(GRC_LOCAL_CTRL));
5806
5807         /* TG3_BDINFOs */
5808         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5809                tr32(RCVDBDI_JUMBO_BD + 0x0),
5810                tr32(RCVDBDI_JUMBO_BD + 0x4),
5811                tr32(RCVDBDI_JUMBO_BD + 0x8),
5812                tr32(RCVDBDI_JUMBO_BD + 0xc));
5813         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5814                tr32(RCVDBDI_STD_BD + 0x0),
5815                tr32(RCVDBDI_STD_BD + 0x4),
5816                tr32(RCVDBDI_STD_BD + 0x8),
5817                tr32(RCVDBDI_STD_BD + 0xc));
5818         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5819                tr32(RCVDBDI_MINI_BD + 0x0),
5820                tr32(RCVDBDI_MINI_BD + 0x4),
5821                tr32(RCVDBDI_MINI_BD + 0x8),
5822                tr32(RCVDBDI_MINI_BD + 0xc));
5823
5824         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5825         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5826         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5827         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5828         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5829                val32, val32_2, val32_3, val32_4);
5830
5831         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5832         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5833         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5834         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5835         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5836                val32, val32_2, val32_3, val32_4);
5837
5838         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5839         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5840         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5841         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5842         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5843         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5844                val32, val32_2, val32_3, val32_4, val32_5);
5845
5846         /* SW status block */
5847         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5848                tp->hw_status->status,
5849                tp->hw_status->status_tag,
5850                tp->hw_status->rx_jumbo_consumer,
5851                tp->hw_status->rx_consumer,
5852                tp->hw_status->rx_mini_consumer,
5853                tp->hw_status->idx[0].rx_producer,
5854                tp->hw_status->idx[0].tx_consumer);
5855
5856         /* SW statistics block */
5857         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5858                ((u32 *)tp->hw_stats)[0],
5859                ((u32 *)tp->hw_stats)[1],
5860                ((u32 *)tp->hw_stats)[2],
5861                ((u32 *)tp->hw_stats)[3]);
5862
5863         /* Mailboxes */
5864         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5865                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5866                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5867                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5868                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5869
5870         /* NIC side send descriptors. */
5871         for (i = 0; i < 6; i++) {
5872                 unsigned long txd;
5873
5874                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5875                         + (i * sizeof(struct tg3_tx_buffer_desc));
5876                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5877                        i,
5878                        readl(txd + 0x0), readl(txd + 0x4),
5879                        readl(txd + 0x8), readl(txd + 0xc));
5880         }
5881
5882         /* NIC side RX descriptors. */
5883         for (i = 0; i < 6; i++) {
5884                 unsigned long rxd;
5885
5886                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5887                         + (i * sizeof(struct tg3_rx_buffer_desc));
5888                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5889                        i,
5890                        readl(rxd + 0x0), readl(rxd + 0x4),
5891                        readl(rxd + 0x8), readl(rxd + 0xc));
5892                 rxd += (4 * sizeof(u32));
5893                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5894                        i,
5895                        readl(rxd + 0x0), readl(rxd + 0x4),
5896                        readl(rxd + 0x8), readl(rxd + 0xc));
5897         }
5898
5899         for (i = 0; i < 6; i++) {
5900                 unsigned long rxd;
5901
5902                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5903                         + (i * sizeof(struct tg3_rx_buffer_desc));
5904                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5905                        i,
5906                        readl(rxd + 0x0), readl(rxd + 0x4),
5907                        readl(rxd + 0x8), readl(rxd + 0xc));
5908                 rxd += (4 * sizeof(u32));
5909                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5910                        i,
5911                        readl(rxd + 0x0), readl(rxd + 0x4),
5912                        readl(rxd + 0x8), readl(rxd + 0xc));
5913         }
5914 }
5915 #endif
5916
5917 static struct net_device_stats *tg3_get_stats(struct net_device *);
5918 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5919
5920 static int tg3_close(struct net_device *dev)
5921 {
5922         struct tg3 *tp = netdev_priv(dev);
5923
5924         netif_stop_queue(dev);
5925
5926         del_timer_sync(&tp->timer);
5927
5928         spin_lock_irq(&tp->lock);
5929         spin_lock(&tp->tx_lock);
5930 #if 0
5931         tg3_dump_state(tp);
5932 #endif
5933
5934         tg3_disable_ints(tp);
5935
5936         tg3_halt(tp);
5937         tg3_free_rings(tp);
5938         tp->tg3_flags &=
5939                 ~(TG3_FLAG_INIT_COMPLETE |
5940                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5941         netif_carrier_off(tp->dev);
5942
5943         spin_unlock(&tp->tx_lock);
5944         spin_unlock_irq(&tp->lock);
5945
5946         free_irq(dev->irq, dev);
5947
5948         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5949                sizeof(tp->net_stats_prev));
5950         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5951                sizeof(tp->estats_prev));
5952
5953         tg3_free_consistent(tp);
5954
5955         return 0;
5956 }
5957
5958 static inline unsigned long get_stat64(tg3_stat64_t *val)
5959 {
5960         unsigned long ret;
5961
5962 #if (BITS_PER_LONG == 32)
5963         ret = val->low;
5964 #else
5965         ret = ((u64)val->high << 32) | ((u64)val->low);
5966 #endif
5967         return ret;
5968 }
5969
5970 static unsigned long calc_crc_errors(struct tg3 *tp)
5971 {
5972         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5973
5974         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
5975             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5976              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5977                 unsigned long flags;
5978                 u32 val;
5979
5980                 spin_lock_irqsave(&tp->lock, flags);
5981                 tg3_readphy(tp, 0x1e, &val);
5982                 tg3_writephy(tp, 0x1e, val | 0x8000);
5983                 tg3_readphy(tp, 0x14, &val);
5984                 spin_unlock_irqrestore(&tp->lock, flags);
5985
5986                 tp->phy_crc_errors += val;
5987
5988                 return tp->phy_crc_errors;
5989         }
5990
5991         return get_stat64(&hw_stats->rx_fcs_errors);
5992 }
5993
5994 #define ESTAT_ADD(member) \
5995         estats->member =        old_estats->member + \
5996                                 get_stat64(&hw_stats->member)
5997
5998 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5999 {
6000         struct tg3_ethtool_stats *estats = &tp->estats;
6001         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6002         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6003
6004         if (!hw_stats)
6005                 return old_estats;
6006
6007         ESTAT_ADD(rx_octets);
6008         ESTAT_ADD(rx_fragments);
6009         ESTAT_ADD(rx_ucast_packets);
6010         ESTAT_ADD(rx_mcast_packets);
6011         ESTAT_ADD(rx_bcast_packets);
6012         ESTAT_ADD(rx_fcs_errors);
6013         ESTAT_ADD(rx_align_errors);
6014         ESTAT_ADD(rx_xon_pause_rcvd);
6015         ESTAT_ADD(rx_xoff_pause_rcvd);
6016         ESTAT_ADD(rx_mac_ctrl_rcvd);
6017         ESTAT_ADD(rx_xoff_entered);
6018         ESTAT_ADD(rx_frame_too_long_errors);
6019         ESTAT_ADD(rx_jabbers);
6020         ESTAT_ADD(rx_undersize_packets);
6021         ESTAT_ADD(rx_in_length_errors);
6022         ESTAT_ADD(rx_out_length_errors);
6023         ESTAT_ADD(rx_64_or_less_octet_packets);
6024         ESTAT_ADD(rx_65_to_127_octet_packets);
6025         ESTAT_ADD(rx_128_to_255_octet_packets);
6026         ESTAT_ADD(rx_256_to_511_octet_packets);
6027         ESTAT_ADD(rx_512_to_1023_octet_packets);
6028         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6029         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6030         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6031         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6032         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6033
6034         ESTAT_ADD(tx_octets);
6035         ESTAT_ADD(tx_collisions);
6036         ESTAT_ADD(tx_xon_sent);
6037         ESTAT_ADD(tx_xoff_sent);
6038         ESTAT_ADD(tx_flow_control);
6039         ESTAT_ADD(tx_mac_errors);
6040         ESTAT_ADD(tx_single_collisions);
6041         ESTAT_ADD(tx_mult_collisions);
6042         ESTAT_ADD(tx_deferred);
6043         ESTAT_ADD(tx_excessive_collisions);
6044         ESTAT_ADD(tx_late_collisions);
6045         ESTAT_ADD(tx_collide_2times);
6046         ESTAT_ADD(tx_collide_3times);
6047         ESTAT_ADD(tx_collide_4times);
6048         ESTAT_ADD(tx_collide_5times);
6049         ESTAT_ADD(tx_collide_6times);
6050         ESTAT_ADD(tx_collide_7times);
6051         ESTAT_ADD(tx_collide_8times);
6052         ESTAT_ADD(tx_collide_9times);
6053         ESTAT_ADD(tx_collide_10times);
6054         ESTAT_ADD(tx_collide_11times);
6055         ESTAT_ADD(tx_collide_12times);
6056         ESTAT_ADD(tx_collide_13times);
6057         ESTAT_ADD(tx_collide_14times);
6058         ESTAT_ADD(tx_collide_15times);
6059         ESTAT_ADD(tx_ucast_packets);
6060         ESTAT_ADD(tx_mcast_packets);
6061         ESTAT_ADD(tx_bcast_packets);
6062         ESTAT_ADD(tx_carrier_sense_errors);
6063         ESTAT_ADD(tx_discards);
6064         ESTAT_ADD(tx_errors);
6065
6066         ESTAT_ADD(dma_writeq_full);
6067         ESTAT_ADD(dma_write_prioq_full);
6068         ESTAT_ADD(rxbds_empty);
6069         ESTAT_ADD(rx_discards);
6070         ESTAT_ADD(rx_errors);
6071         ESTAT_ADD(rx_threshold_hit);
6072
6073         ESTAT_ADD(dma_readq_full);
6074         ESTAT_ADD(dma_read_prioq_full);
6075         ESTAT_ADD(tx_comp_queue_full);
6076
6077         ESTAT_ADD(ring_set_send_prod_index);
6078         ESTAT_ADD(ring_status_update);
6079         ESTAT_ADD(nic_irqs);
6080         ESTAT_ADD(nic_avoided_irqs);
6081         ESTAT_ADD(nic_tx_threshold_hit);
6082
6083         return estats;
6084 }
6085
6086 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6087 {
6088         struct tg3 *tp = netdev_priv(dev);
6089         struct net_device_stats *stats = &tp->net_stats;
6090         struct net_device_stats *old_stats = &tp->net_stats_prev;
6091         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6092
6093         if (!hw_stats)
6094                 return old_stats;
6095
6096         stats->rx_packets = old_stats->rx_packets +
6097                 get_stat64(&hw_stats->rx_ucast_packets) +
6098                 get_stat64(&hw_stats->rx_mcast_packets) +
6099                 get_stat64(&hw_stats->rx_bcast_packets);
6100                 
6101         stats->tx_packets = old_stats->tx_packets +
6102                 get_stat64(&hw_stats->tx_ucast_packets) +
6103                 get_stat64(&hw_stats->tx_mcast_packets) +
6104                 get_stat64(&hw_stats->tx_bcast_packets);
6105
6106         stats->rx_bytes = old_stats->rx_bytes +
6107                 get_stat64(&hw_stats->rx_octets);
6108         stats->tx_bytes = old_stats->tx_bytes +
6109                 get_stat64(&hw_stats->tx_octets);
6110
6111         stats->rx_errors = old_stats->rx_errors +
6112                 get_stat64(&hw_stats->rx_errors) +
6113                 get_stat64(&hw_stats->rx_discards);
6114         stats->tx_errors = old_stats->tx_errors +
6115                 get_stat64(&hw_stats->tx_errors) +
6116                 get_stat64(&hw_stats->tx_mac_errors) +
6117                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6118                 get_stat64(&hw_stats->tx_discards);
6119
6120         stats->multicast = old_stats->multicast +
6121                 get_stat64(&hw_stats->rx_mcast_packets);
6122         stats->collisions = old_stats->collisions +
6123                 get_stat64(&hw_stats->tx_collisions);
6124
6125         stats->rx_length_errors = old_stats->rx_length_errors +
6126                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6127                 get_stat64(&hw_stats->rx_undersize_packets);
6128
6129         stats->rx_over_errors = old_stats->rx_over_errors +
6130                 get_stat64(&hw_stats->rxbds_empty);
6131         stats->rx_frame_errors = old_stats->rx_frame_errors +
6132                 get_stat64(&hw_stats->rx_align_errors);
6133         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6134                 get_stat64(&hw_stats->tx_discards);
6135         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6136                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6137
6138         stats->rx_crc_errors = old_stats->rx_crc_errors +
6139                 calc_crc_errors(tp);
6140
6141         return stats;
6142 }
6143
6144 static inline u32 calc_crc(unsigned char *buf, int len)
6145 {
6146         u32 reg;
6147         u32 tmp;
6148         int j, k;
6149
6150         reg = 0xffffffff;
6151
6152         for (j = 0; j < len; j++) {
6153                 reg ^= buf[j];
6154
6155                 for (k = 0; k < 8; k++) {
6156                         tmp = reg & 0x01;
6157
6158                         reg >>= 1;
6159
6160                         if (tmp) {
6161                                 reg ^= 0xedb88320;
6162                         }
6163                 }
6164         }
6165
6166         return ~reg;
6167 }
6168
6169 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6170 {
6171         /* accept or reject all multicast frames */
6172         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6173         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6174         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6175         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6176 }
6177
6178 static void __tg3_set_rx_mode(struct net_device *dev)
6179 {
6180         struct tg3 *tp = netdev_priv(dev);
6181         u32 rx_mode;
6182
6183         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6184                                   RX_MODE_KEEP_VLAN_TAG);
6185
6186         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6187          * flag clear.
6188          */
6189 #if TG3_VLAN_TAG_USED
6190         if (!tp->vlgrp &&
6191             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6192                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6193 #else
6194         /* By definition, VLAN is disabled always in this
6195          * case.
6196          */
6197         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6198                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6199 #endif
6200
6201         if (dev->flags & IFF_PROMISC) {
6202                 /* Promiscuous mode. */
6203                 rx_mode |= RX_MODE_PROMISC;
6204         } else if (dev->flags & IFF_ALLMULTI) {
6205                 /* Accept all multicast. */
6206                 tg3_set_multi (tp, 1);
6207         } else if (dev->mc_count < 1) {
6208                 /* Reject all multicast. */
6209                 tg3_set_multi (tp, 0);
6210         } else {
6211                 /* Accept one or more multicast(s). */
6212                 struct dev_mc_list *mclist;
6213                 unsigned int i;
6214                 u32 mc_filter[4] = { 0, };
6215                 u32 regidx;
6216                 u32 bit;
6217                 u32 crc;
6218
6219                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6220                      i++, mclist = mclist->next) {
6221
6222                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6223                         bit = ~crc & 0x7f;
6224                         regidx = (bit & 0x60) >> 5;
6225                         bit &= 0x1f;
6226                         mc_filter[regidx] |= (1 << bit);
6227                 }
6228
6229                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6230                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6231                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6232                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6233         }
6234
6235         if (rx_mode != tp->rx_mode) {
6236                 tp->rx_mode = rx_mode;
6237                 tw32_f(MAC_RX_MODE, rx_mode);
6238                 udelay(10);
6239         }
6240 }
6241
6242 static void tg3_set_rx_mode(struct net_device *dev)
6243 {
6244         struct tg3 *tp = netdev_priv(dev);
6245
6246         spin_lock_irq(&tp->lock);
6247         spin_lock(&tp->tx_lock);
6248         __tg3_set_rx_mode(dev);
6249         spin_unlock(&tp->tx_lock);
6250         spin_unlock_irq(&tp->lock);
6251 }
6252
6253 #define TG3_REGDUMP_LEN         (32 * 1024)
6254
6255 static int tg3_get_regs_len(struct net_device *dev)
6256 {
6257         return TG3_REGDUMP_LEN;
6258 }
6259
6260 static void tg3_get_regs(struct net_device *dev,
6261                 struct ethtool_regs *regs, void *_p)
6262 {
6263         u32 *p = _p;
6264         struct tg3 *tp = netdev_priv(dev);
6265         u8 *orig_p = _p;
6266         int i;
6267
6268         regs->version = 0;
6269
6270         memset(p, 0, TG3_REGDUMP_LEN);
6271
6272         spin_lock_irq(&tp->lock);
6273         spin_lock(&tp->tx_lock);
6274
6275 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6276 #define GET_REG32_LOOP(base,len)                \
6277 do {    p = (u32 *)(orig_p + (base));           \
6278         for (i = 0; i < len; i += 4)            \
6279                 __GET_REG32((base) + i);        \
6280 } while (0)
6281 #define GET_REG32_1(reg)                        \
6282 do {    p = (u32 *)(orig_p + (reg));            \
6283         __GET_REG32((reg));                     \
6284 } while (0)
6285
6286         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6287         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6288         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6289         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6290         GET_REG32_1(SNDDATAC_MODE);
6291         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6292         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6293         GET_REG32_1(SNDBDC_MODE);
6294         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6295         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6296         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6297         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6298         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6299         GET_REG32_1(RCVDCC_MODE);
6300         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6301         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6302         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6303         GET_REG32_1(MBFREE_MODE);
6304         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6305         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6306         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6307         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6308         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6309         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6310         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6311         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6312         GET_REG32_LOOP(FTQ_RESET, 0x120);
6313         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6314         GET_REG32_1(DMAC_MODE);
6315         GET_REG32_LOOP(GRC_MODE, 0x4c);
6316         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6317                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6318
6319 #undef __GET_REG32
6320 #undef GET_REG32_LOOP
6321 #undef GET_REG32_1
6322
6323         spin_unlock(&tp->tx_lock);
6324         spin_unlock_irq(&tp->lock);
6325 }
6326
6327 static int tg3_get_eeprom_len(struct net_device *dev)
6328 {
6329         return EEPROM_CHIP_SIZE;
6330 }
6331
6332 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6333                                                  u32 offset, u32 *val);
6334 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6335 {
6336         struct tg3 *tp = dev->priv;
6337         int ret;
6338         u8  *pd;
6339         u32 i, offset, len, val, b_offset, b_count;
6340
6341         offset = eeprom->offset;
6342         len = eeprom->len;
6343         eeprom->len = 0;
6344
6345         ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6346         if (ret)
6347                 return ret;
6348         eeprom->magic = swab32(eeprom->magic);
6349
6350         if (offset & 3) {
6351                 /* adjustments to start on required 4 byte boundary */
6352                 b_offset = offset & 3;
6353                 b_count = 4 - b_offset;
6354                 if (b_count > len) {
6355                         /* i.e. offset=1 len=2 */
6356                         b_count = len;
6357                 }
6358                 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6359                 if (ret)
6360                         return ret;
6361                 memcpy(data, ((char*)&val) + b_offset, b_count);
6362                 len -= b_count;
6363                 offset += b_count;
6364                 eeprom->len += b_count;
6365         }
6366
6367         /* read bytes upto the last 4 byte boundary */
6368         pd = &data[eeprom->len];
6369         for (i = 0; i < (len - (len & 3)); i += 4) {
6370                 ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
6371                                 (u32*)(pd + i));
6372                 if (ret) {
6373                         eeprom->len += i;
6374                         return ret;
6375                 }
6376         }
6377         eeprom->len += i;
6378
6379         if (len & 3) {
6380                 /* read last bytes not ending on 4 byte boundary */
6381                 pd = &data[eeprom->len];
6382                 b_count = len & 3;
6383                 b_offset = offset + len - b_count;
6384                 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6385                 if (ret)
6386                         return ret;
6387                 memcpy(pd, ((char*)&val), b_count);
6388                 eeprom->len += b_count;
6389         }
6390         return 0;
6391 }
6392
6393 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6394 {
6395         struct tg3 *tp = netdev_priv(dev);
6396   
6397         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6398                                         tp->link_config.phy_is_low_power)
6399                 return -EAGAIN;
6400
6401         cmd->supported = (SUPPORTED_Autoneg);
6402
6403         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6404                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6405                                    SUPPORTED_1000baseT_Full);
6406
6407         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6408                 cmd->supported |= (SUPPORTED_100baseT_Half |
6409                                   SUPPORTED_100baseT_Full |
6410                                   SUPPORTED_10baseT_Half |
6411                                   SUPPORTED_10baseT_Full |
6412                                   SUPPORTED_MII);
6413         else
6414                 cmd->supported |= SUPPORTED_FIBRE;
6415   
6416         cmd->advertising = tp->link_config.advertising;
6417         cmd->speed = tp->link_config.active_speed;
6418         cmd->duplex = tp->link_config.active_duplex;
6419         cmd->port = 0;
6420         cmd->phy_address = PHY_ADDR;
6421         cmd->transceiver = 0;
6422         cmd->autoneg = tp->link_config.autoneg;
6423         cmd->maxtxpkt = 0;
6424         cmd->maxrxpkt = 0;
6425         return 0;
6426 }
6427   
6428 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6429 {
6430         struct tg3 *tp = netdev_priv(dev);
6431   
6432         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6433             tp->link_config.phy_is_low_power)
6434                 return -EAGAIN;
6435
6436         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6437                 /* These are the only valid advertisement bits allowed.  */
6438                 if (cmd->autoneg == AUTONEG_ENABLE &&
6439                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6440                                           ADVERTISED_1000baseT_Full |
6441                                           ADVERTISED_Autoneg |
6442                                           ADVERTISED_FIBRE)))
6443                         return -EINVAL;
6444         }
6445
6446         spin_lock_irq(&tp->lock);
6447         spin_lock(&tp->tx_lock);
6448
6449         tp->link_config.autoneg = cmd->autoneg;
6450         if (cmd->autoneg == AUTONEG_ENABLE) {
6451                 tp->link_config.advertising = cmd->advertising;
6452                 tp->link_config.speed = SPEED_INVALID;
6453                 tp->link_config.duplex = DUPLEX_INVALID;
6454         } else {
6455                 tp->link_config.advertising = 0;
6456                 tp->link_config.speed = cmd->speed;
6457                 tp->link_config.duplex = cmd->duplex;
6458         }
6459   
6460         tg3_setup_phy(tp, 1);
6461         spin_unlock(&tp->tx_lock);
6462         spin_unlock_irq(&tp->lock);
6463   
6464         return 0;
6465 }
6466   
6467 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6468 {
6469         struct tg3 *tp = netdev_priv(dev);
6470   
6471         strcpy(info->driver, DRV_MODULE_NAME);
6472         strcpy(info->version, DRV_MODULE_VERSION);
6473         strcpy(info->bus_info, pci_name(tp->pdev));
6474 }
6475   
6476 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6477 {
6478         struct tg3 *tp = netdev_priv(dev);
6479   
6480         wol->supported = WAKE_MAGIC;
6481         wol->wolopts = 0;
6482         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6483                 wol->wolopts = WAKE_MAGIC;
6484         memset(&wol->sopass, 0, sizeof(wol->sopass));
6485 }
6486   
6487 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6488 {
6489         struct tg3 *tp = netdev_priv(dev);
6490   
6491         if (wol->wolopts & ~WAKE_MAGIC)
6492                 return -EINVAL;
6493         if ((wol->wolopts & WAKE_MAGIC) &&
6494             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6495             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6496                 return -EINVAL;
6497   
6498         spin_lock_irq(&tp->lock);
6499         if (wol->wolopts & WAKE_MAGIC)
6500                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6501         else
6502                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6503         spin_unlock_irq(&tp->lock);
6504   
6505         return 0;
6506 }
6507   
6508 static u32 tg3_get_msglevel(struct net_device *dev)
6509 {
6510         struct tg3 *tp = netdev_priv(dev);
6511         return tp->msg_enable;
6512 }
6513   
6514 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6515 {
6516         struct tg3 *tp = netdev_priv(dev);
6517         tp->msg_enable = value;
6518 }
6519   
6520 #if TG3_TSO_SUPPORT != 0
6521 static int tg3_set_tso(struct net_device *dev, u32 value)
6522 {
6523         struct tg3 *tp = netdev_priv(dev);
6524
6525         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6526                 if (value)
6527                         return -EINVAL;
6528                 return 0;
6529         }
6530         return ethtool_op_set_tso(dev, value);
6531 }
6532 #endif
6533   
6534 static int tg3_nway_reset(struct net_device *dev)
6535 {
6536         struct tg3 *tp = netdev_priv(dev);
6537         u32 bmcr;
6538         int r;
6539   
6540         spin_lock_irq(&tp->lock);
6541         tg3_readphy(tp, MII_BMCR, &bmcr);
6542         tg3_readphy(tp, MII_BMCR, &bmcr);
6543         r = -EINVAL;
6544         if (bmcr & BMCR_ANENABLE) {
6545                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6546                 r = 0;
6547         }
6548         spin_unlock_irq(&tp->lock);
6549   
6550         return r;
6551 }
6552   
6553 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6554 {
6555         struct tg3 *tp = netdev_priv(dev);
6556   
6557         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6558         ering->rx_mini_max_pending = 0;
6559         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6560
6561         ering->rx_pending = tp->rx_pending;
6562         ering->rx_mini_pending = 0;
6563         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6564         ering->tx_pending = tp->tx_pending;
6565 }
6566   
6567 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6568 {
6569         struct tg3 *tp = netdev_priv(dev);
6570   
6571         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6572             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6573             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6574                 return -EINVAL;
6575   
6576         tg3_netif_stop(tp);
6577         spin_lock_irq(&tp->lock);
6578         spin_lock(&tp->tx_lock);
6579   
6580         tp->rx_pending = ering->rx_pending;
6581
6582         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6583             tp->rx_pending > 63)
6584                 tp->rx_pending = 63;
6585         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6586         tp->tx_pending = ering->tx_pending;
6587
6588         tg3_halt(tp);
6589         tg3_init_hw(tp);
6590         tg3_netif_start(tp);
6591         spin_unlock(&tp->tx_lock);
6592         spin_unlock_irq(&tp->lock);
6593   
6594         return 0;
6595 }
6596   
6597 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6598 {
6599         struct tg3 *tp = netdev_priv(dev);
6600   
6601         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6602         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6603         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6604 }
6605   
6606 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6607 {
6608         struct tg3 *tp = netdev_priv(dev);
6609   
6610         tg3_netif_stop(tp);
6611         spin_lock_irq(&tp->lock);
6612         spin_lock(&tp->tx_lock);
6613         if (epause->autoneg)
6614                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6615         else
6616                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6617         if (epause->rx_pause)
6618                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6619         else
6620                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6621         if (epause->tx_pause)
6622                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6623         else
6624                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6625         tg3_halt(tp);
6626         tg3_init_hw(tp);
6627         tg3_netif_start(tp);
6628         spin_unlock(&tp->tx_lock);
6629         spin_unlock_irq(&tp->lock);
6630   
6631         return 0;
6632 }
6633   
6634 static u32 tg3_get_rx_csum(struct net_device *dev)
6635 {
6636         struct tg3 *tp = netdev_priv(dev);
6637         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6638 }
6639   
6640 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6641 {
6642         struct tg3 *tp = netdev_priv(dev);
6643   
6644         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6645                 if (data != 0)
6646                         return -EINVAL;
6647                 return 0;
6648         }
6649   
6650         spin_lock_irq(&tp->lock);
6651         if (data)
6652                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6653         else
6654                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6655         spin_unlock_irq(&tp->lock);
6656   
6657         return 0;
6658 }
6659   
6660 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6661 {
6662         struct tg3 *tp = netdev_priv(dev);
6663   
6664         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6665                 if (data != 0)
6666                         return -EINVAL;
6667                 return 0;
6668         }
6669   
6670         if (data)
6671                 dev->features |= NETIF_F_IP_CSUM;
6672         else
6673                 dev->features &= ~NETIF_F_IP_CSUM;
6674
6675         return 0;
6676 }
6677
6678 static int tg3_get_stats_count (struct net_device *dev)
6679 {
6680         return TG3_NUM_STATS;
6681 }
6682
6683 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6684 {
6685         switch (stringset) {
6686         case ETH_SS_STATS:
6687                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6688                 break;
6689         default:
6690                 WARN_ON(1);     /* we need a WARN() */
6691                 break;
6692         }
6693 }
6694
6695 static void tg3_get_ethtool_stats (struct net_device *dev,
6696                                    struct ethtool_stats *estats, u64 *tmp_stats)
6697 {
6698         struct tg3 *tp = dev->priv;
6699         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6700 }
6701
6702 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6703 {
6704         struct mii_ioctl_data *data = if_mii(ifr);
6705         struct tg3 *tp = netdev_priv(dev);
6706         int err;
6707
6708         switch(cmd) {
6709         case SIOCGMIIPHY:
6710                 data->phy_id = PHY_ADDR;
6711
6712                 /* fallthru */
6713         case SIOCGMIIREG: {
6714                 u32 mii_regval;
6715
6716                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6717                         break;                  /* We have no PHY */
6718
6719                 spin_lock_irq(&tp->lock);
6720                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6721                 spin_unlock_irq(&tp->lock);
6722
6723                 data->val_out = mii_regval;
6724
6725                 return err;
6726         }
6727
6728         case SIOCSMIIREG:
6729                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6730                         break;                  /* We have no PHY */
6731
6732                 if (!capable(CAP_NET_ADMIN))
6733                         return -EPERM;
6734
6735                 spin_lock_irq(&tp->lock);
6736                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6737                 spin_unlock_irq(&tp->lock);
6738
6739                 return err;
6740
6741         default:
6742                 /* do nothing */
6743                 break;
6744         }
6745         return -EOPNOTSUPP;
6746 }
6747
6748 #if TG3_VLAN_TAG_USED
6749 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6750 {
6751         struct tg3 *tp = netdev_priv(dev);
6752
6753         spin_lock_irq(&tp->lock);
6754         spin_lock(&tp->tx_lock);
6755
6756         tp->vlgrp = grp;
6757
6758         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6759         __tg3_set_rx_mode(dev);
6760
6761         spin_unlock(&tp->tx_lock);
6762         spin_unlock_irq(&tp->lock);
6763 }
6764
6765 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6766 {
6767         struct tg3 *tp = netdev_priv(dev);
6768
6769         spin_lock_irq(&tp->lock);
6770         spin_lock(&tp->tx_lock);
6771         if (tp->vlgrp)
6772                 tp->vlgrp->vlan_devices[vid] = NULL;
6773         spin_unlock(&tp->tx_lock);
6774         spin_unlock_irq(&tp->lock);
6775 }
6776 #endif
6777
6778 static struct ethtool_ops tg3_ethtool_ops = {
6779         .get_settings           = tg3_get_settings,
6780         .set_settings           = tg3_set_settings,
6781         .get_drvinfo            = tg3_get_drvinfo,
6782         .get_regs_len           = tg3_get_regs_len,
6783         .get_regs               = tg3_get_regs,
6784         .get_wol                = tg3_get_wol,
6785         .set_wol                = tg3_set_wol,
6786         .get_msglevel           = tg3_get_msglevel,
6787         .set_msglevel           = tg3_set_msglevel,
6788         .nway_reset             = tg3_nway_reset,
6789         .get_link               = ethtool_op_get_link,
6790         .get_eeprom_len         = tg3_get_eeprom_len,
6791         .get_eeprom             = tg3_get_eeprom,
6792         .get_ringparam          = tg3_get_ringparam,
6793         .set_ringparam          = tg3_set_ringparam,
6794         .get_pauseparam         = tg3_get_pauseparam,
6795         .set_pauseparam         = tg3_set_pauseparam,
6796         .get_rx_csum            = tg3_get_rx_csum,
6797         .set_rx_csum            = tg3_set_rx_csum,
6798         .get_tx_csum            = ethtool_op_get_tx_csum,
6799         .set_tx_csum            = tg3_set_tx_csum,
6800         .get_sg                 = ethtool_op_get_sg,
6801         .set_sg                 = ethtool_op_set_sg,
6802 #if TG3_TSO_SUPPORT != 0
6803         .get_tso                = ethtool_op_get_tso,
6804         .set_tso                = tg3_set_tso,
6805 #endif
6806         .get_strings            = tg3_get_strings,
6807         .get_stats_count        = tg3_get_stats_count,
6808         .get_ethtool_stats      = tg3_get_ethtool_stats,
6809 };
6810
6811 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6812 static void __devinit tg3_nvram_init(struct tg3 *tp)
6813 {
6814         int j;
6815
6816         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
6817                 return;
6818
6819         tw32_f(GRC_EEPROM_ADDR,
6820              (EEPROM_ADDR_FSM_RESET |
6821               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6822                EEPROM_ADDR_CLKPERD_SHIFT)));
6823
6824         /* XXX schedule_timeout() ... */
6825         for (j = 0; j < 100; j++)
6826                 udelay(10);
6827
6828         /* Enable seeprom accesses. */
6829         tw32_f(GRC_LOCAL_CTRL,
6830              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6831         udelay(100);
6832
6833         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6834             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6835                 u32 nvcfg1;
6836
6837                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6838                         u32 nvaccess = tr32(NVRAM_ACCESS);
6839
6840                         tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6841                 }
6842
6843                 nvcfg1 = tr32(NVRAM_CFG1);
6844
6845                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6846                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6847                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6848                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6849                 } else {
6850                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6851                         tw32(NVRAM_CFG1, nvcfg1);
6852                 }
6853
6854                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6855                         u32 nvaccess = tr32(NVRAM_ACCESS);
6856
6857                         tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6858                 }
6859         } else {
6860                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6861         }
6862 }
6863
6864 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6865                                                  u32 offset, u32 *val)
6866 {
6867         u32 tmp;
6868         int i;
6869
6870         if (offset > EEPROM_ADDR_ADDR_MASK ||
6871             (offset % 4) != 0)
6872                 return -EINVAL;
6873
6874         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6875                                         EEPROM_ADDR_DEVID_MASK |
6876                                         EEPROM_ADDR_READ);
6877         tw32(GRC_EEPROM_ADDR,
6878              tmp |
6879              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6880              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6881               EEPROM_ADDR_ADDR_MASK) |
6882              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6883
6884         for (i = 0; i < 10000; i++) {
6885                 tmp = tr32(GRC_EEPROM_ADDR);
6886
6887                 if (tmp & EEPROM_ADDR_COMPLETE)
6888                         break;
6889                 udelay(100);
6890         }
6891         if (!(tmp & EEPROM_ADDR_COMPLETE))
6892                 return -EBUSY;
6893
6894         *val = tr32(GRC_EEPROM_DATA);
6895         return 0;
6896 }
6897
6898 static int __devinit tg3_nvram_read(struct tg3 *tp,
6899                                     u32 offset, u32 *val)
6900 {
6901         int i;
6902
6903         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
6904                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
6905                 return -EINVAL;
6906         }
6907
6908         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6909                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6910
6911         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6912                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6913                           NVRAM_BUFFERED_PAGE_POS) +
6914                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6915
6916         if (offset > NVRAM_ADDR_MSK)
6917                 return -EINVAL;
6918
6919         tg3_nvram_lock(tp);
6920
6921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6922                 u32 nvaccess = tr32(NVRAM_ACCESS);
6923
6924                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6925         }
6926
6927         tw32(NVRAM_ADDR, offset);
6928         tw32(NVRAM_CMD,
6929              NVRAM_CMD_RD | NVRAM_CMD_GO |
6930              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6931
6932         /* Wait for done bit to clear. */
6933         for (i = 0; i < 1000; i++) {
6934                 udelay(10);
6935                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6936                         udelay(10);
6937                         *val = swab32(tr32(NVRAM_RDDATA));
6938                         break;
6939                 }
6940         }
6941
6942         tg3_nvram_unlock(tp);
6943
6944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6945                 u32 nvaccess = tr32(NVRAM_ACCESS);
6946
6947                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6948         }
6949
6950         if (i >= 1000)
6951                 return -EBUSY;
6952
6953         return 0;
6954 }
6955
6956 struct subsys_tbl_ent {
6957         u16 subsys_vendor, subsys_devid;
6958         u32 phy_id;
6959 };
6960
6961 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6962         /* Broadcom boards. */
6963         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6964         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6965         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6966         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
6967         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6968         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6969         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
6970         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6971         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6972         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6973         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6974
6975         /* 3com boards. */
6976         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6977         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6978         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
6979         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6980         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6981
6982         /* DELL boards. */
6983         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6984         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6985         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6986         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6987
6988         /* Compaq boards. */
6989         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6990         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6991         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
6992         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6993         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6994
6995         /* IBM boards. */
6996         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
6997 };
6998
6999 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7000 {
7001         int i;
7002
7003         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7004                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7005                      tp->pdev->subsystem_vendor) &&
7006                     (subsys_id_to_phy_id[i].subsys_devid ==
7007                      tp->pdev->subsystem_device))
7008                         return &subsys_id_to_phy_id[i];
7009         }
7010         return NULL;
7011 }
7012
7013 static int __devinit tg3_phy_probe(struct tg3 *tp)
7014 {
7015         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7016         u32 hw_phy_id, hw_phy_id_masked;
7017         u32 val;
7018         int eeprom_signature_found, eeprom_phy_serdes, err;
7019
7020         tp->phy_id = PHY_ID_INVALID;
7021         eeprom_phy_id = PHY_ID_INVALID;
7022         eeprom_phy_serdes = 0;
7023         eeprom_signature_found = 0;
7024         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7025         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7026                 u32 nic_cfg, led_cfg;
7027                 u32 nic_phy_id, cfg2;
7028
7029                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7030                 tp->nic_sram_data_cfg = nic_cfg;
7031
7032                 eeprom_signature_found = 1;
7033
7034                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7035                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7036                         eeprom_phy_serdes = 1;
7037
7038                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7039                 if (nic_phy_id != 0) {
7040                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7041                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7042
7043                         eeprom_phy_id  = (id1 >> 16) << 10;
7044                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7045                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7046                 } else
7047                         eeprom_phy_id = 0;
7048
7049                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7050                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7051                         led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7052                                     SHASTA_EXT_LED_MODE_MASK);
7053                 } else
7054                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7055
7056                 switch (led_cfg) {
7057                 default:
7058                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7059                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7060                         break;
7061
7062                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7063                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7064                         break;
7065
7066                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7067                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7068                         break;
7069
7070                 case SHASTA_EXT_LED_SHARED:
7071                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7072                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7073                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7074                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7075                                                  LED_CTRL_MODE_PHY_2);
7076                         break;
7077
7078                 case SHASTA_EXT_LED_MAC:
7079                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7080                         break;
7081
7082                 case SHASTA_EXT_LED_COMBO:
7083                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7084                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7085                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7086                                                  LED_CTRL_MODE_PHY_2);
7087                         break;
7088
7089                 };
7090
7091                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7092                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7093                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7094                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7095
7096                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7097                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7098                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7099                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7100                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7101
7102                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7103                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7104                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7105                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7106                 }
7107                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7108                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7109
7110                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &cfg2);
7111                 if (cfg2 & (1 << 17))
7112                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7113         }
7114
7115         /* Reading the PHY ID register can conflict with ASF
7116          * firwmare access to the PHY hardware.
7117          */
7118         err = 0;
7119         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7120                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7121         } else {
7122                 /* Now read the physical PHY_ID from the chip and verify
7123                  * that it is sane.  If it doesn't look good, we fall back
7124                  * to either the hard-coded table based PHY_ID and failing
7125                  * that the value found in the eeprom area.
7126                  */
7127                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7128                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7129
7130                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7131                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7132                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7133
7134                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7135         }
7136
7137         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7138                 tp->phy_id = hw_phy_id;
7139                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7140                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7141         } else {
7142                 if (eeprom_signature_found) {
7143                         tp->phy_id = eeprom_phy_id;
7144                         if (eeprom_phy_serdes)
7145                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7146                 } else {
7147                         struct subsys_tbl_ent *p;
7148
7149                         /* No eeprom signature?  Try the hardcoded
7150                          * subsys device table.
7151                          */
7152                         p = lookup_by_subsys(tp);
7153                         if (!p)
7154                                 return -ENODEV;
7155
7156                         tp->phy_id = p->phy_id;
7157                         if (!tp->phy_id ||
7158                             tp->phy_id == PHY_ID_BCM8002)
7159                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7160                 }
7161         }
7162
7163         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7164             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7165                 u32 bmsr, adv_reg, tg3_ctrl;
7166
7167                 tg3_readphy(tp, MII_BMSR, &bmsr);
7168                 tg3_readphy(tp, MII_BMSR, &bmsr);
7169
7170                 if (bmsr & BMSR_LSTATUS)
7171                         goto skip_phy_reset;
7172                     
7173                 err = tg3_phy_reset(tp);
7174                 if (err)
7175                         return err;
7176
7177                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7178                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7179                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7180                 tg3_ctrl = 0;
7181                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7182                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7183                                     MII_TG3_CTRL_ADV_1000_FULL);
7184                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7185                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7186                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7187                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7188                 }
7189
7190                 if (!tg3_copper_is_advertising_all(tp)) {
7191                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7192
7193                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7194                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7195
7196                         tg3_writephy(tp, MII_BMCR,
7197                                      BMCR_ANENABLE | BMCR_ANRESTART);
7198                 }
7199                 tg3_phy_set_wirespeed(tp);
7200
7201                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7202                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7203                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7204         }
7205
7206 skip_phy_reset:
7207         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7208                 err = tg3_init_5401phy_dsp(tp);
7209                 if (err)
7210                         return err;
7211         }
7212
7213         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7214                 err = tg3_init_5401phy_dsp(tp);
7215         }
7216
7217         if (!eeprom_signature_found)
7218                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7219
7220         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7221                 tp->link_config.advertising =
7222                         (ADVERTISED_1000baseT_Half |
7223                          ADVERTISED_1000baseT_Full |
7224                          ADVERTISED_Autoneg |
7225                          ADVERTISED_FIBRE);
7226         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7227                 tp->link_config.advertising &=
7228                         ~(ADVERTISED_1000baseT_Half |
7229                           ADVERTISED_1000baseT_Full);
7230
7231         return err;
7232 }
7233
7234 static void __devinit tg3_read_partno(struct tg3 *tp)
7235 {
7236         unsigned char vpd_data[256];
7237         int i;
7238
7239         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7240                 /* Sun decided not to put the necessary bits in the
7241                  * NVRAM of their onboard tg3 parts :(
7242                  */
7243                 strcpy(tp->board_part_number, "Sun 570X");
7244                 return;
7245         }
7246
7247         for (i = 0; i < 256; i += 4) {
7248                 u32 tmp;
7249
7250                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7251                         goto out_not_found;
7252
7253                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7254                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7255                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7256                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7257         }
7258
7259         /* Now parse and find the part number. */
7260         for (i = 0; i < 256; ) {
7261                 unsigned char val = vpd_data[i];
7262                 int block_end;
7263
7264                 if (val == 0x82 || val == 0x91) {
7265                         i = (i + 3 +
7266                              (vpd_data[i + 1] +
7267                               (vpd_data[i + 2] << 8)));
7268                         continue;
7269                 }
7270
7271                 if (val != 0x90)
7272                         goto out_not_found;
7273
7274                 block_end = (i + 3 +
7275                              (vpd_data[i + 1] +
7276                               (vpd_data[i + 2] << 8)));
7277                 i += 3;
7278                 while (i < block_end) {
7279                         if (vpd_data[i + 0] == 'P' &&
7280                             vpd_data[i + 1] == 'N') {
7281                                 int partno_len = vpd_data[i + 2];
7282
7283                                 if (partno_len > 24)
7284                                         goto out_not_found;
7285
7286                                 memcpy(tp->board_part_number,
7287                                        &vpd_data[i + 3],
7288                                        partno_len);
7289
7290                                 /* Success. */
7291                                 return;
7292                         }
7293                 }
7294
7295                 /* Part number not found. */
7296                 goto out_not_found;
7297         }
7298
7299 out_not_found:
7300         strcpy(tp->board_part_number, "none");
7301 }
7302
7303 #ifdef CONFIG_SPARC64
7304 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7305 {
7306         struct pci_dev *pdev = tp->pdev;
7307         struct pcidev_cookie *pcp = pdev->sysdata;
7308
7309         if (pcp != NULL) {
7310                 int node = pcp->prom_node;
7311                 u32 venid;
7312                 int err;
7313
7314                 err = prom_getproperty(node, "subsystem-vendor-id",
7315                                        (char *) &venid, sizeof(venid));
7316                 if (err == 0 || err == -1)
7317                         return 0;
7318                 if (venid == PCI_VENDOR_ID_SUN)
7319                         return 1;
7320         }
7321         return 0;
7322 }
7323 #endif
7324
7325 static int __devinit tg3_get_invariants(struct tg3 *tp)
7326 {
7327         u32 misc_ctrl_reg;
7328         u32 cacheline_sz_reg;
7329         u32 pci_state_reg, grc_misc_cfg;
7330         u32 val;
7331         u16 pci_cmd;
7332         int err;
7333
7334 #ifdef CONFIG_SPARC64
7335         if (tg3_is_sun_570X(tp))
7336                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7337 #endif
7338
7339         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7340          * reordering to the mailbox registers done by the host
7341          * controller can cause major troubles.  We read back from
7342          * every mailbox register write to force the writes to be
7343          * posted to the chip in order.
7344          */
7345         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7346                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7347             pci_find_device(PCI_VENDOR_ID_INTEL,
7348                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7349             pci_find_device(PCI_VENDOR_ID_INTEL,
7350                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7351             pci_find_device(PCI_VENDOR_ID_INTEL,
7352                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7353             pci_find_device(PCI_VENDOR_ID_AMD,
7354                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7355                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7356
7357         /* Force memory write invalidate off.  If we leave it on,
7358          * then on 5700_BX chips we have to enable a workaround.
7359          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7360          * to match the cacheline size.  The Broadcom driver have this
7361          * workaround but turns MWI off all the times so never uses
7362          * it.  This seems to suggest that the workaround is insufficient.
7363          */
7364         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7365         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7366         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7367
7368         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7369          * has the register indirect write enable bit set before
7370          * we try to access any of the MMIO registers.  It is also
7371          * critical that the PCI-X hw workaround situation is decided
7372          * before that as well.
7373          */
7374         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7375                               &misc_ctrl_reg);
7376
7377         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7378                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7379
7380         /* Initialize misc host control in PCI block. */
7381         tp->misc_host_ctrl |= (misc_ctrl_reg &
7382                                MISC_HOST_CTRL_CHIPREV);
7383         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7384                                tp->misc_host_ctrl);
7385
7386         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7387                               &cacheline_sz_reg);
7388
7389         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7390         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7391         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7392         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7393
7394         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7395                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7396
7397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7398             tp->pci_lat_timer < 64) {
7399                 tp->pci_lat_timer = 64;
7400
7401                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7402                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7403                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7404                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7405
7406                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7407                                        cacheline_sz_reg);
7408         }
7409
7410         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7411                               &pci_state_reg);
7412
7413         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7414                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7415
7416                 /* If this is a 5700 BX chipset, and we are in PCI-X
7417                  * mode, enable register write workaround.
7418                  *
7419                  * The workaround is to use indirect register accesses
7420                  * for all chip writes not to mailbox registers.
7421                  */
7422                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7423                         u32 pm_reg;
7424                         u16 pci_cmd;
7425
7426                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7427
7428                         /* The chip can have it's power management PCI config
7429                          * space registers clobbered due to this bug.
7430                          * So explicitly force the chip into D0 here.
7431                          */
7432                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7433                                               &pm_reg);
7434                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7435                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7436                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7437                                                pm_reg);
7438
7439                         /* Also, force SERR#/PERR# in PCI command. */
7440                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7441                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7442                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7443                 }
7444         }
7445
7446         /* Back to back register writes can cause problems on this chip,
7447          * the workaround is to read back all reg writes except those to
7448          * mailbox regs.  See tg3_write_indirect_reg32().
7449          *
7450          * PCI Express 5750_A0 rev chips need this workaround too.
7451          */
7452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7453             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7454              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7455                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7456
7457         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7458                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7459         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7460                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7461
7462         /* Chip-specific fixup from Broadcom driver */
7463         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7464             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7465                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7466                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7467         }
7468
7469         /* Force the chip into D0. */
7470         err = tg3_set_power_state(tp, 0);
7471         if (err) {
7472                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7473                        pci_name(tp->pdev));
7474                 return err;
7475         }
7476
7477         /* 5700 B0 chips do not support checksumming correctly due
7478          * to hardware bugs.
7479          */
7480         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7481                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7482
7483         /* Pseudo-header checksum is done by hardware logic and not
7484          * the offload processers, so make the chip do the pseudo-
7485          * header checksums on receive.  For transmit it is more
7486          * convenient to do the pseudo-header checksum in software
7487          * as Linux does that on transmit for us in all cases.
7488          */
7489         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7490         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7491
7492         /* Derive initial jumbo mode from MTU assigned in
7493          * ether_setup() via the alloc_etherdev() call
7494          */
7495         if (tp->dev->mtu > ETH_DATA_LEN)
7496                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7497
7498         /* Determine WakeOnLan speed to use. */
7499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7500             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7501             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7502             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7503                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7504         } else {
7505                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7506         }
7507
7508         /* A few boards don't want Ethernet@WireSpeed phy feature */
7509         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7510             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7511              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7512              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7513                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7514
7515         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7516             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7517                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7518         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7519                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7520
7521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7523                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7524
7525         /* Only 5701 and later support tagged irq status mode.
7526          * Also, 5788 chips cannot use tagged irq status.
7527          *
7528          * However, since we are using NAPI avoid tagged irq status
7529          * because the interrupt condition is more difficult to
7530          * fully clear in that mode.
7531          */
7532         tp->coalesce_mode = 0;
7533
7534         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7535             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7536                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7537
7538         /* Initialize MAC MI mode, polling disabled. */
7539         tw32_f(MAC_MI_MODE, tp->mi_mode);
7540         udelay(80);
7541
7542         /* Initialize data/descriptor byte/word swapping. */
7543         val = tr32(GRC_MODE);
7544         val &= GRC_MODE_HOST_STACKUP;
7545         tw32(GRC_MODE, val | tp->grc_mode);
7546
7547         tg3_switch_clocks(tp);
7548
7549         /* Clear this out for sanity. */
7550         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7551
7552         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7553                               &pci_state_reg);
7554         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7555             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7556                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7557
7558                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7559                     chiprevid == CHIPREV_ID_5701_B0 ||
7560                     chiprevid == CHIPREV_ID_5701_B2 ||
7561                     chiprevid == CHIPREV_ID_5701_B5) {
7562                         void __iomem *sram_base;
7563
7564                         /* Write some dummy words into the SRAM status block
7565                          * area, see if it reads back correctly.  If the return
7566                          * value is bad, force enable the PCIX workaround.
7567                          */
7568                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7569
7570                         writel(0x00000000, sram_base);
7571                         writel(0x00000000, sram_base + 4);
7572                         writel(0xffffffff, sram_base + 4);
7573                         if (readl(sram_base) != 0x00000000)
7574                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7575                 }
7576         }
7577
7578         udelay(50);
7579         tg3_nvram_init(tp);
7580
7581         grc_misc_cfg = tr32(GRC_MISC_CFG);
7582         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7583
7584         /* Broadcom's driver says that CIOBE multisplit has a bug */
7585 #if 0
7586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7587             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7588                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7589                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7590         }
7591 #endif
7592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7593             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7594              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7595                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7596
7597         /* these are limited to 10/100 only */
7598         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7599              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7600             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7601              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7602              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7603               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7604               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7605             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7606              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7607                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7608
7609         err = tg3_phy_probe(tp);
7610         if (err) {
7611                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7612                        pci_name(tp->pdev), err);
7613                 /* ... but do not return immediately ... */
7614         }
7615
7616         tg3_read_partno(tp);
7617
7618         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7619                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7620         } else {
7621                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7622                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7623                 else
7624                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7625         }
7626
7627         /* 5700 {AX,BX} chips have a broken status block link
7628          * change bit implementation, so we must use the
7629          * status register in those cases.
7630          */
7631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7632                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7633         else
7634                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7635
7636         /* The led_ctrl is set during tg3_phy_probe, here we might
7637          * have to force the link status polling mechanism based
7638          * upon subsystem IDs.
7639          */
7640         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7641             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7642                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7643                                   TG3_FLAG_USE_LINKCHG_REG);
7644         }
7645
7646         /* For all SERDES we poll the MAC status register. */
7647         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7648                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7649         else
7650                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7651
7652         /* 5700 BX chips need to have their TX producer index mailboxes
7653          * written twice to workaround a bug.
7654          */
7655         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7656                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7657         else
7658                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7659
7660         /* It seems all chips can get confused if TX buffers
7661          * straddle the 4GB address boundary in some cases.
7662          */
7663         tp->dev->hard_start_xmit = tg3_start_xmit;
7664
7665         tp->rx_offset = 2;
7666         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7667             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7668                 tp->rx_offset = 0;
7669
7670         /* By default, disable wake-on-lan.  User can change this
7671          * using ETHTOOL_SWOL.
7672          */
7673         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7674
7675         return err;
7676 }
7677
7678 #ifdef CONFIG_SPARC64
7679 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7680 {
7681         struct net_device *dev = tp->dev;
7682         struct pci_dev *pdev = tp->pdev;
7683         struct pcidev_cookie *pcp = pdev->sysdata;
7684
7685         if (pcp != NULL) {
7686                 int node = pcp->prom_node;
7687
7688                 if (prom_getproplen(node, "local-mac-address") == 6) {
7689                         prom_getproperty(node, "local-mac-address",
7690                                          dev->dev_addr, 6);
7691                         return 0;
7692                 }
7693         }
7694         return -ENODEV;
7695 }
7696
7697 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7698 {
7699         struct net_device *dev = tp->dev;
7700
7701         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7702         return 0;
7703 }
7704 #endif
7705
7706 static int __devinit tg3_get_device_address(struct tg3 *tp)
7707 {
7708         struct net_device *dev = tp->dev;
7709         u32 hi, lo, mac_offset;
7710
7711 #ifdef CONFIG_SPARC64
7712         if (!tg3_get_macaddr_sparc(tp))
7713                 return 0;
7714 #endif
7715
7716         mac_offset = 0x7c;
7717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7718             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
7719                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7720                         mac_offset = 0xcc;
7721                 if (tg3_nvram_lock(tp))
7722                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7723                 else
7724                         tg3_nvram_unlock(tp);
7725         }
7726
7727         /* First try to get it from MAC address mailbox. */
7728         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7729         if ((hi >> 16) == 0x484b) {
7730                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7731                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7732
7733                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7734                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7735                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7736                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7737                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7738         }
7739         /* Next, try NVRAM. */
7740         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
7741                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7742                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7743                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7744                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7745                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7746                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7747                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7748                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7749         }
7750         /* Finally just fetch it out of the MAC control regs. */
7751         else {
7752                 hi = tr32(MAC_ADDR_0_HIGH);
7753                 lo = tr32(MAC_ADDR_0_LOW);
7754
7755                 dev->dev_addr[5] = lo & 0xff;
7756                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7757                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7758                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7759                 dev->dev_addr[1] = hi & 0xff;
7760                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7761         }
7762
7763         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7764 #ifdef CONFIG_SPARC64
7765                 if (!tg3_get_default_macaddr_sparc(tp))
7766                         return 0;
7767 #endif
7768                 return -EINVAL;
7769         }
7770         return 0;
7771 }
7772
7773 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7774 {
7775         struct tg3_internal_buffer_desc test_desc;
7776         u32 sram_dma_descs;
7777         int i, ret;
7778
7779         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7780
7781         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7782         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7783         tw32(RDMAC_STATUS, 0);
7784         tw32(WDMAC_STATUS, 0);
7785
7786         tw32(BUFMGR_MODE, 0);
7787         tw32(FTQ_RESET, 0);
7788
7789         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7790         test_desc.addr_lo = buf_dma & 0xffffffff;
7791         test_desc.nic_mbuf = 0x00002100;
7792         test_desc.len = size;
7793
7794         /*
7795          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7796          * the *second* time the tg3 driver was getting loaded after an
7797          * initial scan.
7798          *
7799          * Broadcom tells me:
7800          *   ...the DMA engine is connected to the GRC block and a DMA
7801          *   reset may affect the GRC block in some unpredictable way...
7802          *   The behavior of resets to individual blocks has not been tested.
7803          *
7804          * Broadcom noted the GRC reset will also reset all sub-components.
7805          */
7806         if (to_device) {
7807                 test_desc.cqid_sqid = (13 << 8) | 2;
7808
7809                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7810                 udelay(40);
7811         } else {
7812                 test_desc.cqid_sqid = (16 << 8) | 7;
7813
7814                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7815                 udelay(40);
7816         }
7817         test_desc.flags = 0x00000005;
7818
7819         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7820                 u32 val;
7821
7822                 val = *(((u32 *)&test_desc) + i);
7823                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7824                                        sram_dma_descs + (i * sizeof(u32)));
7825                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7826         }
7827         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7828
7829         if (to_device) {
7830                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7831         } else {
7832                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7833         }
7834
7835         ret = -ENODEV;
7836         for (i = 0; i < 40; i++) {
7837                 u32 val;
7838
7839                 if (to_device)
7840                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7841                 else
7842                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7843                 if ((val & 0xffff) == sram_dma_descs) {
7844                         ret = 0;
7845                         break;
7846                 }
7847
7848                 udelay(100);
7849         }
7850
7851         return ret;
7852 }
7853
7854 #define TEST_BUFFER_SIZE        0x400
7855
7856 static int __devinit tg3_test_dma(struct tg3 *tp)
7857 {
7858         dma_addr_t buf_dma;
7859         u32 *buf;
7860         int ret;
7861
7862         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7863         if (!buf) {
7864                 ret = -ENOMEM;
7865                 goto out_nofree;
7866         }
7867
7868         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7869                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7870
7871 #ifndef CONFIG_X86
7872         {
7873                 u8 byte;
7874                 int cacheline_size;
7875                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7876
7877                 if (byte == 0)
7878                         cacheline_size = 1024;
7879                 else
7880                         cacheline_size = (int) byte * 4;
7881
7882                 switch (cacheline_size) {
7883                 case 16:
7884                 case 32:
7885                 case 64:
7886                 case 128:
7887                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7888                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7889                                 tp->dma_rwctrl |=
7890                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7891                                 break;
7892                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7893                                 tp->dma_rwctrl &=
7894                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
7895                                 tp->dma_rwctrl |=
7896                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7897                                 break;
7898                         }
7899                         /* fallthrough */
7900                 case 256:
7901                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7902                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7903                                 tp->dma_rwctrl |=
7904                                         DMA_RWCTRL_WRITE_BNDRY_256;
7905                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7906                                 tp->dma_rwctrl |=
7907                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7908                 };
7909         }
7910 #endif
7911
7912         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7913                 tp->dma_rwctrl |= 0x001f0000;
7914         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7915                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7916                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7917                         tp->dma_rwctrl |= 0x003f0000;
7918                 else
7919                         tp->dma_rwctrl |= 0x003f000f;
7920         } else {
7921                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7922                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7923                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7924
7925                         if (ccval == 0x6 || ccval == 0x7)
7926                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7927
7928                         /* Set bit 23 to renable PCIX hw bug fix */
7929                         tp->dma_rwctrl |= 0x009f0000;
7930                 } else {
7931                         tp->dma_rwctrl |= 0x001b000f;
7932                 }
7933         }
7934
7935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7937                 tp->dma_rwctrl &= 0xfffffff0;
7938
7939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7941                 /* Remove this if it causes problems for some boards. */
7942                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7943
7944                 /* On 5700/5701 chips, we need to set this bit.
7945                  * Otherwise the chip will issue cacheline transactions
7946                  * to streamable DMA memory with not all the byte
7947                  * enables turned on.  This is an error on several
7948                  * RISC PCI controllers, in particular sparc64.
7949                  *
7950                  * On 5703/5704 chips, this bit has been reassigned
7951                  * a different meaning.  In particular, it is used
7952                  * on those chips to enable a PCI-X workaround.
7953                  */
7954                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7955         }
7956
7957         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7958
7959 #if 0
7960         /* Unneeded, already done by tg3_get_invariants.  */
7961         tg3_switch_clocks(tp);
7962 #endif
7963
7964         ret = 0;
7965         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7966             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7967                 goto out;
7968
7969         while (1) {
7970                 u32 *p = buf, i;
7971
7972                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7973                         p[i] = i;
7974
7975                 /* Send the buffer to the chip. */
7976                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7977                 if (ret) {
7978                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7979                         break;
7980                 }
7981
7982 #if 0
7983                 /* validate data reached card RAM correctly. */
7984                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7985                         u32 val;
7986                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7987                         if (le32_to_cpu(val) != p[i]) {
7988                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
7989                                 /* ret = -ENODEV here? */
7990                         }
7991                         p[i] = 0;
7992                 }
7993 #endif
7994                 /* Now read it back. */
7995                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7996                 if (ret) {
7997                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7998
7999                         break;
8000                 }
8001
8002                 /* Verify it. */
8003                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8004                         if (p[i] == i)
8005                                 continue;
8006
8007                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8008                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8009                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8010                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8011                                 break;
8012                         } else {
8013                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8014                                 ret = -ENODEV;
8015                                 goto out;
8016                         }
8017                 }
8018
8019                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8020                         /* Success. */
8021                         ret = 0;
8022                         break;
8023                 }
8024         }
8025
8026 out:
8027         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8028 out_nofree:
8029         return ret;
8030 }
8031
8032 static void __devinit tg3_init_link_config(struct tg3 *tp)
8033 {
8034         tp->link_config.advertising =
8035                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8036                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8037                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8038                  ADVERTISED_Autoneg | ADVERTISED_MII);
8039         tp->link_config.speed = SPEED_INVALID;
8040         tp->link_config.duplex = DUPLEX_INVALID;
8041         tp->link_config.autoneg = AUTONEG_ENABLE;
8042         netif_carrier_off(tp->dev);
8043         tp->link_config.active_speed = SPEED_INVALID;
8044         tp->link_config.active_duplex = DUPLEX_INVALID;
8045         tp->link_config.phy_is_low_power = 0;
8046         tp->link_config.orig_speed = SPEED_INVALID;
8047         tp->link_config.orig_duplex = DUPLEX_INVALID;
8048         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8049 }
8050
8051 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8052 {
8053         tp->bufmgr_config.mbuf_read_dma_low_water =
8054                 DEFAULT_MB_RDMA_LOW_WATER;
8055         tp->bufmgr_config.mbuf_mac_rx_low_water =
8056                 DEFAULT_MB_MACRX_LOW_WATER;
8057         tp->bufmgr_config.mbuf_high_water =
8058                 DEFAULT_MB_HIGH_WATER;
8059
8060         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8061                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8062         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8063                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8064         tp->bufmgr_config.mbuf_high_water_jumbo =
8065                 DEFAULT_MB_HIGH_WATER_JUMBO;
8066
8067         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8068         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8069 }
8070
8071 static char * __devinit tg3_phy_string(struct tg3 *tp)
8072 {
8073         switch (tp->phy_id & PHY_ID_MASK) {
8074         case PHY_ID_BCM5400:    return "5400";
8075         case PHY_ID_BCM5401:    return "5401";
8076         case PHY_ID_BCM5411:    return "5411";
8077         case PHY_ID_BCM5701:    return "5701";
8078         case PHY_ID_BCM5703:    return "5703";
8079         case PHY_ID_BCM5704:    return "5704";
8080         case PHY_ID_BCM5705:    return "5705";
8081         case PHY_ID_BCM5750:    return "5750";
8082         case PHY_ID_BCM8002:    return "8002/serdes";
8083         case 0:                 return "serdes";
8084         default:                return "unknown";
8085         };
8086 }
8087
8088 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8089 {
8090         struct pci_dev *peer;
8091         unsigned int func, devnr = tp->pdev->devfn & ~7;
8092
8093         for (func = 0; func < 8; func++) {
8094                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8095                 if (peer && peer != tp->pdev)
8096                         break;
8097                 pci_dev_put(peer);
8098         }
8099         if (!peer || peer == tp->pdev)
8100                 BUG();
8101
8102         /*
8103          * We don't need to keep the refcount elevated; there's no way
8104          * to remove one half of this device without removing the other
8105          */
8106         pci_dev_put(peer);
8107
8108         return peer;
8109 }
8110
8111 static int __devinit tg3_init_one(struct pci_dev *pdev,
8112                                   const struct pci_device_id *ent)
8113 {
8114         static int tg3_version_printed = 0;
8115         unsigned long tg3reg_base, tg3reg_len;
8116         struct net_device *dev;
8117         struct tg3 *tp;
8118         int i, err, pci_using_dac, pm_cap;
8119
8120         if (tg3_version_printed++ == 0)
8121                 printk(KERN_INFO "%s", version);
8122
8123         err = pci_enable_device(pdev);
8124         if (err) {
8125                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8126                        "aborting.\n");
8127                 return err;
8128         }
8129
8130         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8131                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8132                        "base address, aborting.\n");
8133                 err = -ENODEV;
8134                 goto err_out_disable_pdev;
8135         }
8136
8137         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8138         if (err) {
8139                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8140                        "aborting.\n");
8141                 goto err_out_disable_pdev;
8142         }
8143
8144         pci_set_master(pdev);
8145
8146         /* Find power-management capability. */
8147         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8148         if (pm_cap == 0) {
8149                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8150                        "aborting.\n");
8151                 err = -EIO;
8152                 goto err_out_free_res;
8153         }
8154
8155         /* Configure DMA attributes. */
8156         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8157         if (!err) {
8158                 pci_using_dac = 1;
8159                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8160                 if (err < 0) {
8161                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8162                                "for consistent allocations\n");
8163                         goto err_out_free_res;
8164                 }
8165         } else {
8166                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8167                 if (err) {
8168                         printk(KERN_ERR PFX "No usable DMA configuration, "
8169                                "aborting.\n");
8170                         goto err_out_free_res;
8171                 }
8172                 pci_using_dac = 0;
8173         }
8174
8175         tg3reg_base = pci_resource_start(pdev, 0);
8176         tg3reg_len = pci_resource_len(pdev, 0);
8177
8178         dev = alloc_etherdev(sizeof(*tp));
8179         if (!dev) {
8180                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8181                 err = -ENOMEM;
8182                 goto err_out_free_res;
8183         }
8184
8185         SET_MODULE_OWNER(dev);
8186         SET_NETDEV_DEV(dev, &pdev->dev);
8187
8188         if (pci_using_dac)
8189                 dev->features |= NETIF_F_HIGHDMA;
8190         dev->features |= NETIF_F_LLTX;
8191 #if TG3_VLAN_TAG_USED
8192         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8193         dev->vlan_rx_register = tg3_vlan_rx_register;
8194         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8195 #endif
8196
8197         tp = netdev_priv(dev);
8198         tp->pdev = pdev;
8199         tp->dev = dev;
8200         tp->pm_cap = pm_cap;
8201         tp->mac_mode = TG3_DEF_MAC_MODE;
8202         tp->rx_mode = TG3_DEF_RX_MODE;
8203         tp->tx_mode = TG3_DEF_TX_MODE;
8204         tp->mi_mode = MAC_MI_MODE_BASE;
8205         if (tg3_debug > 0)
8206                 tp->msg_enable = tg3_debug;
8207         else
8208                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8209
8210         /* The word/byte swap controls here control register access byte
8211          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8212          * setting below.
8213          */
8214         tp->misc_host_ctrl =
8215                 MISC_HOST_CTRL_MASK_PCI_INT |
8216                 MISC_HOST_CTRL_WORD_SWAP |
8217                 MISC_HOST_CTRL_INDIR_ACCESS |
8218                 MISC_HOST_CTRL_PCISTATE_RW;
8219
8220         /* The NONFRM (non-frame) byte/word swap controls take effect
8221          * on descriptor entries, anything which isn't packet data.
8222          *
8223          * The StrongARM chips on the board (one for tx, one for rx)
8224          * are running in big-endian mode.
8225          */
8226         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8227                         GRC_MODE_WSWAP_NONFRM_DATA);
8228 #ifdef __BIG_ENDIAN
8229         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8230 #endif
8231         spin_lock_init(&tp->lock);
8232         spin_lock_init(&tp->tx_lock);
8233         spin_lock_init(&tp->indirect_lock);
8234         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8235
8236         tp->regs = ioremap(tg3reg_base, tg3reg_len);
8237         if (tp->regs == 0UL) {
8238                 printk(KERN_ERR PFX "Cannot map device registers, "
8239                        "aborting.\n");
8240                 err = -ENOMEM;
8241                 goto err_out_free_dev;
8242         }
8243
8244         tg3_init_link_config(tp);
8245
8246         tg3_init_bufmgr_config(tp);
8247
8248         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8249         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8250         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8251
8252         dev->open = tg3_open;
8253         dev->stop = tg3_close;
8254         dev->get_stats = tg3_get_stats;
8255         dev->set_multicast_list = tg3_set_rx_mode;
8256         dev->set_mac_address = tg3_set_mac_addr;
8257         dev->do_ioctl = tg3_ioctl;
8258         dev->tx_timeout = tg3_tx_timeout;
8259         dev->poll = tg3_poll;
8260         dev->ethtool_ops = &tg3_ethtool_ops;
8261         dev->weight = 64;
8262         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8263         dev->change_mtu = tg3_change_mtu;
8264         dev->irq = pdev->irq;
8265 #ifdef CONFIG_NET_POLL_CONTROLLER
8266         dev->poll_controller = tg3_poll_controller;
8267 #endif
8268
8269         err = tg3_get_invariants(tp);
8270         if (err) {
8271                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8272                        "aborting.\n");
8273                 goto err_out_iounmap;
8274         }
8275
8276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8277             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8278                 tp->bufmgr_config.mbuf_read_dma_low_water =
8279                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8280                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8281                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8282                 tp->bufmgr_config.mbuf_high_water =
8283                         DEFAULT_MB_HIGH_WATER_5705;
8284         }
8285
8286 #if TG3_TSO_SUPPORT != 0
8287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8289             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8290             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8291              GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8292                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8293         } else {
8294                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8295         }
8296
8297         /* TSO is off by default, user can enable using ethtool.  */
8298 #if 0
8299         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8300                 dev->features |= NETIF_F_TSO;
8301 #endif
8302
8303 #endif
8304
8305         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8306             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8307             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8308                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8309                 tp->rx_pending = 63;
8310         }
8311
8312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8313                 tp->pdev_peer = tg3_find_5704_peer(tp);
8314
8315         err = tg3_get_device_address(tp);
8316         if (err) {
8317                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8318                        "aborting.\n");
8319                 goto err_out_iounmap;
8320         }
8321
8322         /*
8323          * Reset chip in case UNDI or EFI driver did not shutdown
8324          * DMA self test will enable WDMAC and we'll see (spurious)
8325          * pending DMA on the PCI bus at that point.
8326          */
8327         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8328             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8329                 pci_save_state(tp->pdev, tp->pci_cfg_state);
8330                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8331                 tg3_halt(tp);
8332         }
8333
8334         err = tg3_test_dma(tp);
8335         if (err) {
8336                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8337                 goto err_out_iounmap;
8338         }
8339
8340         /* Tigon3 can do ipv4 only... and some chips have buggy
8341          * checksumming.
8342          */
8343         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8344                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8345                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8346         } else
8347                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8348
8349         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8350                 dev->features &= ~NETIF_F_HIGHDMA;
8351
8352         /* flow control autonegotiation is default behavior */
8353         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8354
8355         err = register_netdev(dev);
8356         if (err) {
8357                 printk(KERN_ERR PFX "Cannot register net device, "
8358                        "aborting.\n");
8359                 goto err_out_iounmap;
8360         }
8361
8362         pci_set_drvdata(pdev, dev);
8363
8364         /* Now that we have fully setup the chip, save away a snapshot
8365          * of the PCI config space.  We need to restore this after
8366          * GRC_MISC_CFG core clock resets and some resume events.
8367          */
8368         pci_save_state(tp->pdev, tp->pci_cfg_state);
8369
8370         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8371                dev->name,
8372                tp->board_part_number,
8373                tp->pci_chip_rev_id,
8374                tg3_phy_string(tp),
8375                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8376                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8377                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8378                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8379                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8380                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8381
8382         for (i = 0; i < 6; i++)
8383                 printk("%2.2x%c", dev->dev_addr[i],
8384                        i == 5 ? '\n' : ':');
8385
8386         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8387                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8388                "TSOcap[%d] \n",
8389                dev->name,
8390                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8391                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8392                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8393                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8394                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8395                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8396                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8397
8398         return 0;
8399
8400 err_out_iounmap:
8401         iounmap(tp->regs);
8402
8403 err_out_free_dev:
8404         free_netdev(dev);
8405
8406 err_out_free_res:
8407         pci_release_regions(pdev);
8408
8409 err_out_disable_pdev:
8410         pci_disable_device(pdev);
8411         pci_set_drvdata(pdev, NULL);
8412         return err;
8413 }
8414
8415 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8416 {
8417         struct net_device *dev = pci_get_drvdata(pdev);
8418
8419         if (dev) {
8420                 struct tg3 *tp = netdev_priv(dev);
8421
8422                 unregister_netdev(dev);
8423                 iounmap(tp->regs);
8424                 free_netdev(dev);
8425                 pci_release_regions(pdev);
8426                 pci_disable_device(pdev);
8427                 pci_set_drvdata(pdev, NULL);
8428         }
8429 }
8430
8431 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8432 {
8433         struct net_device *dev = pci_get_drvdata(pdev);
8434         struct tg3 *tp = netdev_priv(dev);
8435         int err;
8436
8437         if (!netif_running(dev))
8438                 return 0;
8439
8440         tg3_netif_stop(tp);
8441
8442         del_timer_sync(&tp->timer);
8443
8444         spin_lock_irq(&tp->lock);
8445         spin_lock(&tp->tx_lock);
8446         tg3_disable_ints(tp);
8447         spin_unlock(&tp->tx_lock);
8448         spin_unlock_irq(&tp->lock);
8449
8450         netif_device_detach(dev);
8451
8452         spin_lock_irq(&tp->lock);
8453         spin_lock(&tp->tx_lock);
8454         tg3_halt(tp);
8455         spin_unlock(&tp->tx_lock);
8456         spin_unlock_irq(&tp->lock);
8457
8458         err = tg3_set_power_state(tp, state);
8459         if (err) {
8460                 spin_lock_irq(&tp->lock);
8461                 spin_lock(&tp->tx_lock);
8462
8463                 tg3_init_hw(tp);
8464
8465                 tp->timer.expires = jiffies + tp->timer_offset;
8466                 add_timer(&tp->timer);
8467
8468                 netif_device_attach(dev);
8469                 tg3_netif_start(tp);
8470
8471                 spin_unlock(&tp->tx_lock);
8472                 spin_unlock_irq(&tp->lock);
8473         }
8474
8475         return err;
8476 }
8477
8478 static int tg3_resume(struct pci_dev *pdev)
8479 {
8480         struct net_device *dev = pci_get_drvdata(pdev);
8481         struct tg3 *tp = netdev_priv(dev);
8482         int err;
8483
8484         if (!netif_running(dev))
8485                 return 0;
8486
8487         pci_restore_state(tp->pdev, tp->pci_cfg_state);
8488
8489         err = tg3_set_power_state(tp, 0);
8490         if (err)
8491                 return err;
8492
8493         netif_device_attach(dev);
8494
8495         spin_lock_irq(&tp->lock);
8496         spin_lock(&tp->tx_lock);
8497
8498         tg3_init_hw(tp);
8499
8500         tp->timer.expires = jiffies + tp->timer_offset;
8501         add_timer(&tp->timer);
8502
8503         tg3_enable_ints(tp);
8504
8505         tg3_netif_start(tp);
8506
8507         spin_unlock(&tp->tx_lock);
8508         spin_unlock_irq(&tp->lock);
8509
8510         return 0;
8511 }
8512
8513 static struct pci_driver tg3_driver = {
8514         .name           = DRV_MODULE_NAME,
8515         .id_table       = tg3_pci_tbl,
8516         .probe          = tg3_init_one,
8517         .remove         = __devexit_p(tg3_remove_one),
8518         .suspend        = tg3_suspend,
8519         .resume         = tg3_resume
8520 };
8521
8522 static int __init tg3_init(void)
8523 {
8524         return pci_module_init(&tg3_driver);
8525 }
8526
8527 static void __exit tg3_cleanup(void)
8528 {
8529         pci_unregister_driver(&tg3_driver);
8530 }
8531
8532 module_init(tg3_init);
8533 module_exit(tg3_cleanup);