patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  */
7
8 #include <linux/config.h>
9
10 #include <linux/module.h>
11
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/mii.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ip.h>
27 #include <linux/tcp.h>
28 #include <linux/workqueue.h>
29
30 #include <net/checksum.h>
31
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/byteorder.h>
35 #include <asm/uaccess.h>
36
37 #ifdef CONFIG_SPARC64
38 #include <asm/idprom.h>
39 #include <asm/oplib.h>
40 #include <asm/pbm.h>
41 #endif
42
43 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #define TG3_VLAN_TAG_USED 1
45 #else
46 #define TG3_VLAN_TAG_USED 0
47 #endif
48
49 #ifdef NETIF_F_TSO
50 #define TG3_TSO_SUPPORT 1
51 #else
52 #define TG3_TSO_SUPPORT 0
53 #endif
54
55 #include "tg3.h"
56
57 #define DRV_MODULE_NAME         "tg3"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "3.6"
60 #define DRV_MODULE_RELDATE      "June 12, 2004"
61
62 #define TG3_DEF_MAC_MODE        0
63 #define TG3_DEF_RX_MODE         0
64 #define TG3_DEF_TX_MODE         0
65 #define TG3_DEF_MSG_ENABLE        \
66         (NETIF_MSG_DRV          | \
67          NETIF_MSG_PROBE        | \
68          NETIF_MSG_LINK         | \
69          NETIF_MSG_TIMER        | \
70          NETIF_MSG_IFDOWN       | \
71          NETIF_MSG_IFUP         | \
72          NETIF_MSG_RX_ERR       | \
73          NETIF_MSG_TX_ERR)
74
75 /* length of time before we decide the hardware is borked,
76  * and dev->tx_timeout() should be called to fix the problem
77  */
78 #define TG3_TX_TIMEOUT                  (5 * HZ)
79
80 /* hardware minimum and maximum for a single frame's data payload */
81 #define TG3_MIN_MTU                     60
82 #define TG3_MAX_MTU(tp) \
83         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
84           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
85
86 /* These numbers seem to be hard coded in the NIC firmware somehow.
87  * You can't change the ring sizes, but you can change where you place
88  * them in the NIC onboard memory.
89  */
90 #define TG3_RX_RING_SIZE                512
91 #define TG3_DEF_RX_RING_PENDING         200
92 #define TG3_RX_JUMBO_RING_SIZE          256
93 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
94
95 /* Do not place this n-ring entries value into the tp struct itself,
96  * we really want to expose these constants to GCC so that modulo et
97  * al.  operations are done with shifts and masks instead of with
98  * hw multiply/modulo instructions.  Another solution would be to
99  * replace things like '% foo' with '& (foo - 1)'.
100  */
101 #define TG3_RX_RCB_RING_SIZE(tp)        \
102         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
103           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
104          512 : 1024)
105
106 #define TG3_TX_RING_SIZE                512
107 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
108
109 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
110                                  TG3_RX_RING_SIZE)
111 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_JUMBO_RING_SIZE)
113 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
114                                    TG3_RX_RCB_RING_SIZE(tp))
115 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
116                                  TG3_TX_RING_SIZE)
117 #define TX_RING_GAP(TP) \
118         (TG3_TX_RING_SIZE - (TP)->tx_pending)
119 #define TX_BUFFS_AVAIL(TP)                                              \
120         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
121           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
122           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
123 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124
125 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
126 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
127
128 /* minimum number of free TX descriptors required to wake up TX process */
129 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
130
131 /* number of ETHTOOL_GSTATS u64's */
132 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133
134 static char version[] __devinitdata =
135         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
136
137 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
138 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
139 MODULE_LICENSE("GPL");
140 MODULE_PARM(tg3_debug, "i");
141 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144
145 static struct pci_device_id tg3_pci_tbl[] = {
146         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
147           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
148         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
149           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
150         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
151           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { 0, }
219 };
220
221 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
222
223 struct {
224         char string[ETH_GSTRING_LEN];
225 } ethtool_stats_keys[TG3_NUM_STATS] = {
226         { "rx_octets" },
227         { "rx_fragments" },
228         { "rx_ucast_packets" },
229         { "rx_mcast_packets" },
230         { "rx_bcast_packets" },
231         { "rx_fcs_errors" },
232         { "rx_align_errors" },
233         { "rx_xon_pause_rcvd" },
234         { "rx_xoff_pause_rcvd" },
235         { "rx_mac_ctrl_rcvd" },
236         { "rx_xoff_entered" },
237         { "rx_frame_too_long_errors" },
238         { "rx_jabbers" },
239         { "rx_undersize_packets" },
240         { "rx_in_length_errors" },
241         { "rx_out_length_errors" },
242         { "rx_64_or_less_octet_packets" },
243         { "rx_65_to_127_octet_packets" },
244         { "rx_128_to_255_octet_packets" },
245         { "rx_256_to_511_octet_packets" },
246         { "rx_512_to_1023_octet_packets" },
247         { "rx_1024_to_1522_octet_packets" },
248         { "rx_1523_to_2047_octet_packets" },
249         { "rx_2048_to_4095_octet_packets" },
250         { "rx_4096_to_8191_octet_packets" },
251         { "rx_8192_to_9022_octet_packets" },
252
253         { "tx_octets" },
254         { "tx_collisions" },
255
256         { "tx_xon_sent" },
257         { "tx_xoff_sent" },
258         { "tx_flow_control" },
259         { "tx_mac_errors" },
260         { "tx_single_collisions" },
261         { "tx_mult_collisions" },
262         { "tx_deferred" },
263         { "tx_excessive_collisions" },
264         { "tx_late_collisions" },
265         { "tx_collide_2times" },
266         { "tx_collide_3times" },
267         { "tx_collide_4times" },
268         { "tx_collide_5times" },
269         { "tx_collide_6times" },
270         { "tx_collide_7times" },
271         { "tx_collide_8times" },
272         { "tx_collide_9times" },
273         { "tx_collide_10times" },
274         { "tx_collide_11times" },
275         { "tx_collide_12times" },
276         { "tx_collide_13times" },
277         { "tx_collide_14times" },
278         { "tx_collide_15times" },
279         { "tx_ucast_packets" },
280         { "tx_mcast_packets" },
281         { "tx_bcast_packets" },
282         { "tx_carrier_sense_errors" },
283         { "tx_discards" },
284         { "tx_errors" },
285
286         { "dma_writeq_full" },
287         { "dma_write_prioq_full" },
288         { "rxbds_empty" },
289         { "rx_discards" },
290         { "rx_errors" },
291         { "rx_threshold_hit" },
292
293         { "dma_readq_full" },
294         { "dma_read_prioq_full" },
295         { "tx_comp_queue_full" },
296
297         { "ring_set_send_prod_index" },
298         { "ring_status_update" },
299         { "nic_irqs" },
300         { "nic_avoided_irqs" },
301         { "nic_tx_threshold_hit" }
302 };
303
304 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
305 {
306         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
307                 unsigned long flags;
308
309                 spin_lock_irqsave(&tp->indirect_lock, flags);
310                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
311                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
312                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
313         } else {
314                 writel(val, tp->regs + off);
315                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
316                         readl(tp->regs + off);
317         }
318 }
319
320 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
321 {
322         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
323                 unsigned long flags;
324
325                 spin_lock_irqsave(&tp->indirect_lock, flags);
326                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
327                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
328                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
329         } else {
330                 unsigned long dest = tp->regs + off;
331                 writel(val, dest);
332                 readl(dest);    /* always flush PCI write */
333         }
334 }
335
336 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
337 {
338         unsigned long mbox = tp->regs + off;
339         writel(val, mbox);
340         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
341                 readl(mbox);
342 }
343
344 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long mbox = tp->regs + off;
347         writel(val, mbox);
348         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
349                 writel(val, mbox);
350         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
351                 readl(mbox);
352 }
353
354 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
355 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
356 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
357
358 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
359 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
360 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
361 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
362 #define tr32(reg)               readl(tp->regs + (reg))
363 #define tr16(reg)               readw(tp->regs + (reg))
364 #define tr8(reg)                readb(tp->regs + (reg))
365
366 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
367 {
368         unsigned long flags;
369
370         spin_lock_irqsave(&tp->indirect_lock, flags);
371         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
372         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
373
374         /* Always leave this as zero. */
375         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377 }
378
379 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_disable_ints(struct tg3 *tp)
393 {
394         tw32(TG3PCI_MISC_HOST_CTRL,
395              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
396         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
397         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
398 }
399
400 static inline void tg3_cond_int(struct tg3 *tp)
401 {
402         if (tp->hw_status->status & SD_STATUS_UPDATED)
403                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
404 }
405
406 static void tg3_enable_ints(struct tg3 *tp)
407 {
408         tw32(TG3PCI_MISC_HOST_CTRL,
409              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
410         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
411         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
412
413         tg3_cond_int(tp);
414 }
415
416 static inline void tg3_netif_stop(struct tg3 *tp)
417 {
418         netif_poll_disable(tp->dev);
419         netif_tx_disable(tp->dev);
420 }
421
422 static inline void tg3_netif_start(struct tg3 *tp)
423 {
424         netif_wake_queue(tp->dev);
425         /* NOTE: unconditional netif_wake_queue is only appropriate
426          * so long as all callers are assured to have free tx slots
427          * (such as after tg3_init_hw)
428          */
429         netif_poll_enable(tp->dev);
430         tg3_cond_int(tp);
431 }
432
433 static void tg3_switch_clocks(struct tg3 *tp)
434 {
435         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
436         u32 orig_clock_ctrl;
437
438         orig_clock_ctrl = clock_ctrl;
439         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
440                        CLOCK_CTRL_CLKRUN_OENABLE |
441                        0x1f);
442         tp->pci_clock_ctrl = clock_ctrl;
443
444         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
445             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
446             (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
447                 tw32_f(TG3PCI_CLOCK_CTRL,
448                      clock_ctrl |
449                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
450                 udelay(40);
451                 tw32_f(TG3PCI_CLOCK_CTRL,
452                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
453                 udelay(40);
454         }
455         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
456         udelay(40);
457 }
458
459 #define PHY_BUSY_LOOPS  5000
460
461 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
462 {
463         u32 frame_val;
464         int loops, ret;
465
466         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
467                 tw32_f(MAC_MI_MODE,
468                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
469                 udelay(80);
470         }
471
472         *val = 0xffffffff;
473
474         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
475                       MI_COM_PHY_ADDR_MASK);
476         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
477                       MI_COM_REG_ADDR_MASK);
478         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
479         
480         tw32_f(MAC_MI_COM, frame_val);
481
482         loops = PHY_BUSY_LOOPS;
483         while (loops-- > 0) {
484                 udelay(10);
485                 frame_val = tr32(MAC_MI_COM);
486
487                 if ((frame_val & MI_COM_BUSY) == 0) {
488                         udelay(5);
489                         frame_val = tr32(MAC_MI_COM);
490                         break;
491                 }
492         }
493
494         ret = -EBUSY;
495         if (loops > 0) {
496                 *val = frame_val & MI_COM_DATA_MASK;
497                 ret = 0;
498         }
499
500         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
501                 tw32_f(MAC_MI_MODE, tp->mi_mode);
502                 udelay(80);
503         }
504
505         return ret;
506 }
507
508 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
509 {
510         u32 frame_val;
511         int loops, ret;
512
513         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
514                 tw32_f(MAC_MI_MODE,
515                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
516                 udelay(80);
517         }
518
519         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
520                       MI_COM_PHY_ADDR_MASK);
521         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
522                       MI_COM_REG_ADDR_MASK);
523         frame_val |= (val & MI_COM_DATA_MASK);
524         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
525         
526         tw32_f(MAC_MI_COM, frame_val);
527
528         loops = PHY_BUSY_LOOPS;
529         while (loops-- > 0) {
530                 udelay(10);
531                 frame_val = tr32(MAC_MI_COM);
532                 if ((frame_val & MI_COM_BUSY) == 0) {
533                         udelay(5);
534                         frame_val = tr32(MAC_MI_COM);
535                         break;
536                 }
537         }
538
539         ret = -EBUSY;
540         if (loops > 0)
541                 ret = 0;
542
543         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
544                 tw32_f(MAC_MI_MODE, tp->mi_mode);
545                 udelay(80);
546         }
547
548         return ret;
549 }
550
551 static void tg3_phy_set_wirespeed(struct tg3 *tp)
552 {
553         u32 val;
554
555         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
556                 return;
557
558         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
559         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
560         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
561 }
562
563 static int tg3_bmcr_reset(struct tg3 *tp)
564 {
565         u32 phy_control;
566         int limit, err;
567
568         /* OK, reset it, and poll the BMCR_RESET bit until it
569          * clears or we time out.
570          */
571         phy_control = BMCR_RESET;
572         err = tg3_writephy(tp, MII_BMCR, phy_control);
573         if (err != 0)
574                 return -EBUSY;
575
576         limit = 5000;
577         while (limit--) {
578                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
579                 if (err != 0)
580                         return -EBUSY;
581
582                 if ((phy_control & BMCR_RESET) == 0) {
583                         udelay(40);
584                         break;
585                 }
586                 udelay(10);
587         }
588         if (limit <= 0)
589                 return -EBUSY;
590
591         return 0;
592 }
593
594 static int tg3_wait_macro_done(struct tg3 *tp)
595 {
596         int limit = 100;
597
598         while (limit--) {
599                 u32 tmp32;
600
601                 tg3_readphy(tp, 0x16, &tmp32);
602                 if ((tmp32 & 0x1000) == 0)
603                         break;
604         }
605         if (limit <= 0)
606                 return -EBUSY;
607
608         return 0;
609 }
610
611 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
612 {
613         static const u32 test_pat[4][6] = {
614         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
615         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
616         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
617         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
618         };
619         int chan;
620
621         for (chan = 0; chan < 4; chan++) {
622                 int i;
623
624                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
625                              (chan * 0x2000) | 0x0200);
626                 tg3_writephy(tp, 0x16, 0x0002);
627
628                 for (i = 0; i < 6; i++)
629                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
630                                      test_pat[chan][i]);
631
632                 tg3_writephy(tp, 0x16, 0x0202);
633                 if (tg3_wait_macro_done(tp)) {
634                         *resetp = 1;
635                         return -EBUSY;
636                 }
637
638                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
639                              (chan * 0x2000) | 0x0200);
640                 tg3_writephy(tp, 0x16, 0x0082);
641                 if (tg3_wait_macro_done(tp)) {
642                         *resetp = 1;
643                         return -EBUSY;
644                 }
645
646                 tg3_writephy(tp, 0x16, 0x0802);
647                 if (tg3_wait_macro_done(tp)) {
648                         *resetp = 1;
649                         return -EBUSY;
650                 }
651
652                 for (i = 0; i < 6; i += 2) {
653                         u32 low, high;
654
655                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
656                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
657                         if (tg3_wait_macro_done(tp)) {
658                                 *resetp = 1;
659                                 return -EBUSY;
660                         }
661                         low &= 0x7fff;
662                         high &= 0x000f;
663                         if (low != test_pat[chan][i] ||
664                             high != test_pat[chan][i+1]) {
665                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
666                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
667                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
668
669                                 return -EBUSY;
670                         }
671                 }
672         }
673
674         return 0;
675 }
676
677 static int tg3_phy_reset_chanpat(struct tg3 *tp)
678 {
679         int chan;
680
681         for (chan = 0; chan < 4; chan++) {
682                 int i;
683
684                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
685                              (chan * 0x2000) | 0x0200);
686                 tg3_writephy(tp, 0x16, 0x0002);
687                 for (i = 0; i < 6; i++)
688                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
689                 tg3_writephy(tp, 0x16, 0x0202);
690                 if (tg3_wait_macro_done(tp))
691                         return -EBUSY;
692         }
693
694         return 0;
695 }
696
697 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
698 {
699         u32 reg32, phy9_orig;
700         int retries, do_phy_reset, err;
701
702         retries = 10;
703         do_phy_reset = 1;
704         do {
705                 if (do_phy_reset) {
706                         err = tg3_bmcr_reset(tp);
707                         if (err)
708                                 return err;
709                         do_phy_reset = 0;
710                 }
711
712                 /* Disable transmitter and interrupt.  */
713                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
714                 reg32 |= 0x3000;
715                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
716
717                 /* Set full-duplex, 1000 mbps.  */
718                 tg3_writephy(tp, MII_BMCR,
719                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
720
721                 /* Set to master mode.  */
722                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
723                 tg3_writephy(tp, MII_TG3_CTRL,
724                              (MII_TG3_CTRL_AS_MASTER |
725                               MII_TG3_CTRL_ENABLE_AS_MASTER));
726
727                 /* Enable SM_DSP_CLOCK and 6dB.  */
728                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
729
730                 /* Block the PHY control access.  */
731                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
732                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
733
734                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
735                 if (!err)
736                         break;
737         } while (--retries);
738
739         err = tg3_phy_reset_chanpat(tp);
740         if (err)
741                 return err;
742
743         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
744         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
745
746         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
747         tg3_writephy(tp, 0x16, 0x0000);
748
749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
750             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
751                 /* Set Extended packet length bit for jumbo frames */
752                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
753         }
754         else {
755                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
756         }
757
758         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
759
760         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
761         reg32 &= ~0x3000;
762         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
763
764         return err;
765 }
766
767 /* This will reset the tigon3 PHY if there is no valid
768  * link unless the FORCE argument is non-zero.
769  */
770 static int tg3_phy_reset(struct tg3 *tp)
771 {
772         u32 phy_status;
773         int err;
774
775         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
776         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
777         if (err != 0)
778                 return -EBUSY;
779
780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
781             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
782             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
783                 err = tg3_phy_reset_5703_4_5(tp);
784                 if (err)
785                         return err;
786                 goto out;
787         }
788
789         err = tg3_bmcr_reset(tp);
790         if (err)
791                 return err;
792
793 out:
794         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
795                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
796                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
797                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
798                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
799                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
800                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
801         }
802         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
803                 tg3_writephy(tp, 0x1c, 0x8d68);
804                 tg3_writephy(tp, 0x1c, 0x8d68);
805         }
806         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
807                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
808                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
809                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
810                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
811                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
812                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
813                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
814                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
815         }
816         /* Set Extended packet length bit (bit 14) on all chips that */
817         /* support jumbo frames */
818         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
819                 /* Cannot do read-modify-write on 5401 */
820                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
821         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
822                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
823                 u32 phy_reg;
824
825                 /* Set bit 14 with read-modify-write to preserve other bits */
826                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
827                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
828                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
829         }
830         tg3_phy_set_wirespeed(tp);
831         return 0;
832 }
833
834 static void tg3_frob_aux_power(struct tg3 *tp)
835 {
836         struct tg3 *tp_peer = tp;
837
838         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
839                 return;
840
841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
842                 tp_peer = pci_get_drvdata(tp->pdev_peer);
843                 if (!tp_peer)
844                         BUG();
845         }
846
847
848         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
849             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
851                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
852                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
853                              (GRC_LCLCTRL_GPIO_OE0 |
854                               GRC_LCLCTRL_GPIO_OE1 |
855                               GRC_LCLCTRL_GPIO_OE2 |
856                               GRC_LCLCTRL_GPIO_OUTPUT0 |
857                               GRC_LCLCTRL_GPIO_OUTPUT1));
858                         udelay(100);
859                 } else {
860                         if (tp_peer != tp &&
861                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
862                                 return;
863
864                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
865                              (GRC_LCLCTRL_GPIO_OE0 |
866                               GRC_LCLCTRL_GPIO_OE1 |
867                               GRC_LCLCTRL_GPIO_OE2 |
868                               GRC_LCLCTRL_GPIO_OUTPUT1 |
869                               GRC_LCLCTRL_GPIO_OUTPUT2));
870                         udelay(100);
871
872                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
873                              (GRC_LCLCTRL_GPIO_OE0 |
874                               GRC_LCLCTRL_GPIO_OE1 |
875                               GRC_LCLCTRL_GPIO_OE2 |
876                               GRC_LCLCTRL_GPIO_OUTPUT0 |
877                               GRC_LCLCTRL_GPIO_OUTPUT1 |
878                               GRC_LCLCTRL_GPIO_OUTPUT2));
879                         udelay(100);
880
881                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
882                              (GRC_LCLCTRL_GPIO_OE0 |
883                               GRC_LCLCTRL_GPIO_OE1 |
884                               GRC_LCLCTRL_GPIO_OE2 |
885                               GRC_LCLCTRL_GPIO_OUTPUT0 |
886                               GRC_LCLCTRL_GPIO_OUTPUT1));
887                         udelay(100);
888                 }
889         } else {
890                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
891                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
892                         if (tp_peer != tp &&
893                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
894                                 return;
895
896                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
897                              (GRC_LCLCTRL_GPIO_OE1 |
898                               GRC_LCLCTRL_GPIO_OUTPUT1));
899                         udelay(100);
900
901                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
902                              (GRC_LCLCTRL_GPIO_OE1));
903                         udelay(100);
904
905                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
906                              (GRC_LCLCTRL_GPIO_OE1 |
907                               GRC_LCLCTRL_GPIO_OUTPUT1));
908                         udelay(100);
909                 }
910         }
911 }
912
913 static int tg3_setup_phy(struct tg3 *, int);
914
915 #define RESET_KIND_SHUTDOWN     0
916 #define RESET_KIND_INIT         1
917 #define RESET_KIND_SUSPEND      2
918
919 static void tg3_write_sig_post_reset(struct tg3 *, int);
920
921 static int tg3_set_power_state(struct tg3 *tp, int state)
922 {
923         u32 misc_host_ctrl;
924         u16 power_control, power_caps;
925         int pm = tp->pm_cap;
926
927         /* Make sure register accesses (indirect or otherwise)
928          * will function correctly.
929          */
930         pci_write_config_dword(tp->pdev,
931                                TG3PCI_MISC_HOST_CTRL,
932                                tp->misc_host_ctrl);
933
934         pci_read_config_word(tp->pdev,
935                              pm + PCI_PM_CTRL,
936                              &power_control);
937         power_control |= PCI_PM_CTRL_PME_STATUS;
938         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
939         switch (state) {
940         case 0:
941                 power_control |= 0;
942                 pci_write_config_word(tp->pdev,
943                                       pm + PCI_PM_CTRL,
944                                       power_control);
945                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
946                 udelay(100);
947
948                 return 0;
949
950         case 1:
951                 power_control |= 1;
952                 break;
953
954         case 2:
955                 power_control |= 2;
956                 break;
957
958         case 3:
959                 power_control |= 3;
960                 break;
961
962         default:
963                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
964                        "requested.\n",
965                        tp->dev->name, state);
966                 return -EINVAL;
967         };
968
969         power_control |= PCI_PM_CTRL_PME_ENABLE;
970
971         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
972         tw32(TG3PCI_MISC_HOST_CTRL,
973              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
974
975         if (tp->link_config.phy_is_low_power == 0) {
976                 tp->link_config.phy_is_low_power = 1;
977                 tp->link_config.orig_speed = tp->link_config.speed;
978                 tp->link_config.orig_duplex = tp->link_config.duplex;
979                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
980         }
981
982         if (tp->phy_id != PHY_ID_SERDES) {
983                 tp->link_config.speed = SPEED_10;
984                 tp->link_config.duplex = DUPLEX_HALF;
985                 tp->link_config.autoneg = AUTONEG_ENABLE;
986                 tg3_setup_phy(tp, 0);
987         }
988
989         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
990
991         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
992                 u32 mac_mode;
993
994                 if (tp->phy_id != PHY_ID_SERDES) {
995                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
996                         udelay(40);
997
998                         mac_mode = MAC_MODE_PORT_MODE_MII;
999
1000                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1001                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1002                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1003                 } else {
1004                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1005                 }
1006
1007                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1008                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1009
1010                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1011                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1012                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1013
1014                 tw32_f(MAC_MODE, mac_mode);
1015                 udelay(100);
1016
1017                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1018                 udelay(10);
1019         }
1020
1021         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1022             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1023              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1024                 u32 base_val;
1025
1026                 base_val = tp->pci_clock_ctrl;
1027                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1028                              CLOCK_CTRL_TXCLK_DISABLE);
1029
1030                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1031                      CLOCK_CTRL_ALTCLK |
1032                      CLOCK_CTRL_PWRDOWN_PLL133);
1033                 udelay(40);
1034         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1035                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1036                 u32 newbits1, newbits2;
1037
1038                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1039                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1040                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1041                                     CLOCK_CTRL_TXCLK_DISABLE |
1042                                     CLOCK_CTRL_ALTCLK);
1043                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1044                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1045                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1046                         newbits1 = CLOCK_CTRL_625_CORE;
1047                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1048                 } else {
1049                         newbits1 = CLOCK_CTRL_ALTCLK;
1050                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1051                 }
1052
1053                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1054                 udelay(40);
1055
1056                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1057                 udelay(40);
1058
1059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1060                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1061                         u32 newbits3;
1062
1063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1064                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1065                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1066                                             CLOCK_CTRL_TXCLK_DISABLE |
1067                                             CLOCK_CTRL_44MHZ_CORE);
1068                         } else {
1069                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1070                         }
1071
1072                         tw32_f(TG3PCI_CLOCK_CTRL,
1073                                          tp->pci_clock_ctrl | newbits3);
1074                         udelay(40);
1075                 }
1076         }
1077
1078         tg3_frob_aux_power(tp);
1079
1080         /* Finally, set the new power state. */
1081         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1082
1083         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1084
1085         return 0;
1086 }
1087
1088 static void tg3_link_report(struct tg3 *tp)
1089 {
1090         if (!netif_carrier_ok(tp->dev)) {
1091                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1092         } else {
1093                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1094                        tp->dev->name,
1095                        (tp->link_config.active_speed == SPEED_1000 ?
1096                         1000 :
1097                         (tp->link_config.active_speed == SPEED_100 ?
1098                          100 : 10)),
1099                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1100                         "full" : "half"));
1101
1102                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1103                        "%s for RX.\n",
1104                        tp->dev->name,
1105                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1106                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1107         }
1108 }
1109
1110 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1111 {
1112         u32 new_tg3_flags = 0;
1113         u32 old_rx_mode = tp->rx_mode;
1114         u32 old_tx_mode = tp->tx_mode;
1115
1116         if (local_adv & ADVERTISE_PAUSE_CAP) {
1117                 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1118                         if (remote_adv & LPA_PAUSE_CAP)
1119                                 new_tg3_flags |=
1120                                         (TG3_FLAG_RX_PAUSE |
1121                                          TG3_FLAG_TX_PAUSE);
1122                         else if (remote_adv & LPA_PAUSE_ASYM)
1123                                 new_tg3_flags |=
1124                                         (TG3_FLAG_RX_PAUSE);
1125                 } else {
1126                         if (remote_adv & LPA_PAUSE_CAP)
1127                                 new_tg3_flags |=
1128                                         (TG3_FLAG_RX_PAUSE |
1129                                          TG3_FLAG_TX_PAUSE);
1130                 }
1131         } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1132                 if ((remote_adv & LPA_PAUSE_CAP) &&
1133                     (remote_adv & LPA_PAUSE_ASYM))
1134                         new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1135         }
1136
1137         tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1138         tp->tg3_flags |= new_tg3_flags;
1139
1140         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1141                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1142         else
1143                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1144
1145         if (old_rx_mode != tp->rx_mode) {
1146                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1147         }
1148         
1149         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1150                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1151         else
1152                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1153
1154         if (old_tx_mode != tp->tx_mode) {
1155                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1156         }
1157 }
1158
1159 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1160 {
1161         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1162         case MII_TG3_AUX_STAT_10HALF:
1163                 *speed = SPEED_10;
1164                 *duplex = DUPLEX_HALF;
1165                 break;
1166
1167         case MII_TG3_AUX_STAT_10FULL:
1168                 *speed = SPEED_10;
1169                 *duplex = DUPLEX_FULL;
1170                 break;
1171
1172         case MII_TG3_AUX_STAT_100HALF:
1173                 *speed = SPEED_100;
1174                 *duplex = DUPLEX_HALF;
1175                 break;
1176
1177         case MII_TG3_AUX_STAT_100FULL:
1178                 *speed = SPEED_100;
1179                 *duplex = DUPLEX_FULL;
1180                 break;
1181
1182         case MII_TG3_AUX_STAT_1000HALF:
1183                 *speed = SPEED_1000;
1184                 *duplex = DUPLEX_HALF;
1185                 break;
1186
1187         case MII_TG3_AUX_STAT_1000FULL:
1188                 *speed = SPEED_1000;
1189                 *duplex = DUPLEX_FULL;
1190                 break;
1191
1192         default:
1193                 *speed = SPEED_INVALID;
1194                 *duplex = DUPLEX_INVALID;
1195                 break;
1196         };
1197 }
1198
1199 static int tg3_phy_copper_begin(struct tg3 *tp)
1200 {
1201         u32 new_adv;
1202         int i;
1203
1204         if (tp->link_config.phy_is_low_power) {
1205                 /* Entering low power mode.  Disable gigabit and
1206                  * 100baseT advertisements.
1207                  */
1208                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1209
1210                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1211                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1212                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1213                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1214
1215                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1216         } else if (tp->link_config.speed == SPEED_INVALID) {
1217                 tp->link_config.advertising =
1218                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1219                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1220                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1221                          ADVERTISED_Autoneg | ADVERTISED_MII);
1222
1223                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1224                         tp->link_config.advertising &=
1225                                 ~(ADVERTISED_1000baseT_Half |
1226                                   ADVERTISED_1000baseT_Full);
1227
1228                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1229                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1230                         new_adv |= ADVERTISE_10HALF;
1231                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1232                         new_adv |= ADVERTISE_10FULL;
1233                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1234                         new_adv |= ADVERTISE_100HALF;
1235                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1236                         new_adv |= ADVERTISE_100FULL;
1237                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1238
1239                 if (tp->link_config.advertising &
1240                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1241                         new_adv = 0;
1242                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1243                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1244                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1245                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1246                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1247                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1248                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1249                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1250                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1251                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1252                 } else {
1253                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1254                 }
1255         } else {
1256                 /* Asking for a specific link mode. */
1257                 if (tp->link_config.speed == SPEED_1000) {
1258                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1259                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1260
1261                         if (tp->link_config.duplex == DUPLEX_FULL)
1262                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1263                         else
1264                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1265                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1266                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1267                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1268                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1269                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1270                 } else {
1271                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1272
1273                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1274                         if (tp->link_config.speed == SPEED_100) {
1275                                 if (tp->link_config.duplex == DUPLEX_FULL)
1276                                         new_adv |= ADVERTISE_100FULL;
1277                                 else
1278                                         new_adv |= ADVERTISE_100HALF;
1279                         } else {
1280                                 if (tp->link_config.duplex == DUPLEX_FULL)
1281                                         new_adv |= ADVERTISE_10FULL;
1282                                 else
1283                                         new_adv |= ADVERTISE_10HALF;
1284                         }
1285                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1286                 }
1287         }
1288
1289         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1290             tp->link_config.speed != SPEED_INVALID) {
1291                 u32 bmcr, orig_bmcr;
1292
1293                 tp->link_config.active_speed = tp->link_config.speed;
1294                 tp->link_config.active_duplex = tp->link_config.duplex;
1295
1296                 bmcr = 0;
1297                 switch (tp->link_config.speed) {
1298                 default:
1299                 case SPEED_10:
1300                         break;
1301
1302                 case SPEED_100:
1303                         bmcr |= BMCR_SPEED100;
1304                         break;
1305
1306                 case SPEED_1000:
1307                         bmcr |= TG3_BMCR_SPEED1000;
1308                         break;
1309                 };
1310
1311                 if (tp->link_config.duplex == DUPLEX_FULL)
1312                         bmcr |= BMCR_FULLDPLX;
1313
1314                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1315                 if (bmcr != orig_bmcr) {
1316                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1317                         for (i = 0; i < 1500; i++) {
1318                                 u32 tmp;
1319
1320                                 udelay(10);
1321                                 tg3_readphy(tp, MII_BMSR, &tmp);
1322                                 tg3_readphy(tp, MII_BMSR, &tmp);
1323                                 if (!(tmp & BMSR_LSTATUS)) {
1324                                         udelay(40);
1325                                         break;
1326                                 }
1327                         }
1328                         tg3_writephy(tp, MII_BMCR, bmcr);
1329                         udelay(40);
1330                 }
1331         } else {
1332                 tg3_writephy(tp, MII_BMCR,
1333                              BMCR_ANENABLE | BMCR_ANRESTART);
1334         }
1335
1336         return 0;
1337 }
1338
1339 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1340 {
1341         int err;
1342
1343         /* Turn off tap power management. */
1344         /* Set Extended packet length bit */
1345         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1346
1347         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1348         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1349
1350         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1351         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1352
1353         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1354         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1355
1356         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1357         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1358
1359         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1360         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1361
1362         udelay(40);
1363
1364         return err;
1365 }
1366
1367 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1368 {
1369         u32 adv_reg, all_mask;
1370
1371         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1372         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1373                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1374         if ((adv_reg & all_mask) != all_mask)
1375                 return 0;
1376         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1377                 u32 tg3_ctrl;
1378
1379                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1380                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1381                             MII_TG3_CTRL_ADV_1000_FULL);
1382                 if ((tg3_ctrl & all_mask) != all_mask)
1383                         return 0;
1384         }
1385         return 1;
1386 }
1387
1388 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1389 {
1390         int current_link_up;
1391         u32 bmsr, dummy;
1392         u16 current_speed;
1393         u8 current_duplex;
1394         int i, err;
1395
1396         tw32(MAC_EVENT, 0);
1397
1398         tw32_f(MAC_STATUS,
1399              (MAC_STATUS_SYNC_CHANGED |
1400               MAC_STATUS_CFG_CHANGED |
1401               MAC_STATUS_MI_COMPLETION |
1402               MAC_STATUS_LNKSTATE_CHANGED));
1403         udelay(40);
1404
1405         tp->mi_mode = MAC_MI_MODE_BASE;
1406         tw32_f(MAC_MI_MODE, tp->mi_mode);
1407         udelay(80);
1408
1409         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1410
1411         /* Some third-party PHYs need to be reset on link going
1412          * down.
1413          */
1414         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1415              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1416              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1417             netif_carrier_ok(tp->dev)) {
1418                 tg3_readphy(tp, MII_BMSR, &bmsr);
1419                 tg3_readphy(tp, MII_BMSR, &bmsr);
1420                 if (!(bmsr & BMSR_LSTATUS))
1421                         force_reset = 1;
1422         }
1423         if (force_reset)
1424                 tg3_phy_reset(tp);
1425
1426         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1427                 tg3_readphy(tp, MII_BMSR, &bmsr);
1428                 tg3_readphy(tp, MII_BMSR, &bmsr);
1429
1430                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1431                         bmsr = 0;
1432
1433                 if (!(bmsr & BMSR_LSTATUS)) {
1434                         err = tg3_init_5401phy_dsp(tp);
1435                         if (err)
1436                                 return err;
1437
1438                         tg3_readphy(tp, MII_BMSR, &bmsr);
1439                         for (i = 0; i < 1000; i++) {
1440                                 udelay(10);
1441                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1442                                 if (bmsr & BMSR_LSTATUS) {
1443                                         udelay(40);
1444                                         break;
1445                                 }
1446                         }
1447
1448                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1449                             !(bmsr & BMSR_LSTATUS) &&
1450                             tp->link_config.active_speed == SPEED_1000) {
1451                                 err = tg3_phy_reset(tp);
1452                                 if (!err)
1453                                         err = tg3_init_5401phy_dsp(tp);
1454                                 if (err)
1455                                         return err;
1456                         }
1457                 }
1458         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1459                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1460                 /* 5701 {A0,B0} CRC bug workaround */
1461                 tg3_writephy(tp, 0x15, 0x0a75);
1462                 tg3_writephy(tp, 0x1c, 0x8c68);
1463                 tg3_writephy(tp, 0x1c, 0x8d68);
1464                 tg3_writephy(tp, 0x1c, 0x8c68);
1465         }
1466
1467         /* Clear pending interrupts... */
1468         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1469         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1470
1471         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1472                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1473         else
1474                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1475
1476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1477             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1478                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1479                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1480                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1481                 else
1482                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1483         }
1484
1485         current_link_up = 0;
1486         current_speed = SPEED_INVALID;
1487         current_duplex = DUPLEX_INVALID;
1488
1489         bmsr = 0;
1490         for (i = 0; i < 100; i++) {
1491                 tg3_readphy(tp, MII_BMSR, &bmsr);
1492                 tg3_readphy(tp, MII_BMSR, &bmsr);
1493                 if (bmsr & BMSR_LSTATUS)
1494                         break;
1495                 udelay(40);
1496         }
1497
1498         if (bmsr & BMSR_LSTATUS) {
1499                 u32 aux_stat, bmcr;
1500
1501                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1502                 for (i = 0; i < 2000; i++) {
1503                         udelay(10);
1504                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1505                         if (aux_stat)
1506                                 break;
1507                 }
1508
1509                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1510                                              &current_speed,
1511                                              &current_duplex);
1512
1513                 bmcr = 0;
1514                 for (i = 0; i < 200; i++) {
1515                         tg3_readphy(tp, MII_BMCR, &bmcr);
1516                         tg3_readphy(tp, MII_BMCR, &bmcr);
1517                         if (bmcr && bmcr != 0x7fff)
1518                                 break;
1519                         udelay(10);
1520                 }
1521
1522                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1523                         if (bmcr & BMCR_ANENABLE) {
1524                                 current_link_up = 1;
1525
1526                                 /* Force autoneg restart if we are exiting
1527                                  * low power mode.
1528                                  */
1529                                 if (!tg3_copper_is_advertising_all(tp))
1530                                         current_link_up = 0;
1531                         } else {
1532                                 current_link_up = 0;
1533                         }
1534                 } else {
1535                         if (!(bmcr & BMCR_ANENABLE) &&
1536                             tp->link_config.speed == current_speed &&
1537                             tp->link_config.duplex == current_duplex) {
1538                                 current_link_up = 1;
1539                         } else {
1540                                 current_link_up = 0;
1541                         }
1542                 }
1543
1544                 tp->link_config.active_speed = current_speed;
1545                 tp->link_config.active_duplex = current_duplex;
1546         }
1547
1548         if (current_link_up == 1 &&
1549             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1550             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1551                 u32 local_adv, remote_adv;
1552
1553                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1554                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1555
1556                 tg3_readphy(tp, MII_LPA, &remote_adv);
1557                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1558
1559                 /* If we are not advertising full pause capability,
1560                  * something is wrong.  Bring the link down and reconfigure.
1561                  */
1562                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1563                         current_link_up = 0;
1564                 } else {
1565                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1566                 }
1567         }
1568
1569         if (current_link_up == 0) {
1570                 u32 tmp;
1571
1572                 tg3_phy_copper_begin(tp);
1573
1574                 tg3_readphy(tp, MII_BMSR, &tmp);
1575                 tg3_readphy(tp, MII_BMSR, &tmp);
1576                 if (tmp & BMSR_LSTATUS)
1577                         current_link_up = 1;
1578         }
1579
1580         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1581         if (current_link_up == 1) {
1582                 if (tp->link_config.active_speed == SPEED_100 ||
1583                     tp->link_config.active_speed == SPEED_10)
1584                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1585                 else
1586                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1587         } else
1588                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1589
1590         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1591         if (tp->link_config.active_duplex == DUPLEX_HALF)
1592                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1593
1594         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1596                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1597                     (current_link_up == 1 &&
1598                      tp->link_config.active_speed == SPEED_10))
1599                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1600         } else {
1601                 if (current_link_up == 1)
1602                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1603         }
1604
1605         /* ??? Without this setting Netgear GA302T PHY does not
1606          * ??? send/receive packets...
1607          */
1608         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1609             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1610                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1611                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1612                 udelay(80);
1613         }
1614
1615         tw32_f(MAC_MODE, tp->mac_mode);
1616         udelay(40);
1617
1618         if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
1619                 /* Polled via timer. */
1620                 tw32_f(MAC_EVENT, 0);
1621         } else {
1622                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1623         }
1624         udelay(40);
1625
1626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1627             current_link_up == 1 &&
1628             tp->link_config.active_speed == SPEED_1000 &&
1629             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1630              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1631                 udelay(120);
1632                 tw32_f(MAC_STATUS,
1633                      (MAC_STATUS_SYNC_CHANGED |
1634                       MAC_STATUS_CFG_CHANGED));
1635                 udelay(40);
1636                 tg3_write_mem(tp,
1637                               NIC_SRAM_FIRMWARE_MBOX,
1638                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1639         }
1640
1641         if (current_link_up != netif_carrier_ok(tp->dev)) {
1642                 if (current_link_up)
1643                         netif_carrier_on(tp->dev);
1644                 else
1645                         netif_carrier_off(tp->dev);
1646                 tg3_link_report(tp);
1647         }
1648
1649         return 0;
1650 }
1651
1652 struct tg3_fiber_aneginfo {
1653         int state;
1654 #define ANEG_STATE_UNKNOWN              0
1655 #define ANEG_STATE_AN_ENABLE            1
1656 #define ANEG_STATE_RESTART_INIT         2
1657 #define ANEG_STATE_RESTART              3
1658 #define ANEG_STATE_DISABLE_LINK_OK      4
1659 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1660 #define ANEG_STATE_ABILITY_DETECT       6
1661 #define ANEG_STATE_ACK_DETECT_INIT      7
1662 #define ANEG_STATE_ACK_DETECT           8
1663 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1664 #define ANEG_STATE_COMPLETE_ACK         10
1665 #define ANEG_STATE_IDLE_DETECT_INIT     11
1666 #define ANEG_STATE_IDLE_DETECT          12
1667 #define ANEG_STATE_LINK_OK              13
1668 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1669 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1670
1671         u32 flags;
1672 #define MR_AN_ENABLE            0x00000001
1673 #define MR_RESTART_AN           0x00000002
1674 #define MR_AN_COMPLETE          0x00000004
1675 #define MR_PAGE_RX              0x00000008
1676 #define MR_NP_LOADED            0x00000010
1677 #define MR_TOGGLE_TX            0x00000020
1678 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1679 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1680 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1681 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1682 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1683 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1684 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1685 #define MR_TOGGLE_RX            0x00002000
1686 #define MR_NP_RX                0x00004000
1687
1688 #define MR_LINK_OK              0x80000000
1689
1690         unsigned long link_time, cur_time;
1691
1692         u32 ability_match_cfg;
1693         int ability_match_count;
1694
1695         char ability_match, idle_match, ack_match;
1696
1697         u32 txconfig, rxconfig;
1698 #define ANEG_CFG_NP             0x00000080
1699 #define ANEG_CFG_ACK            0x00000040
1700 #define ANEG_CFG_RF2            0x00000020
1701 #define ANEG_CFG_RF1            0x00000010
1702 #define ANEG_CFG_PS2            0x00000001
1703 #define ANEG_CFG_PS1            0x00008000
1704 #define ANEG_CFG_HD             0x00004000
1705 #define ANEG_CFG_FD             0x00002000
1706 #define ANEG_CFG_INVAL          0x00001f06
1707
1708 };
1709 #define ANEG_OK         0
1710 #define ANEG_DONE       1
1711 #define ANEG_TIMER_ENAB 2
1712 #define ANEG_FAILED     -1
1713
1714 #define ANEG_STATE_SETTLE_TIME  10000
1715
1716 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1717                                    struct tg3_fiber_aneginfo *ap)
1718 {
1719         unsigned long delta;
1720         u32 rx_cfg_reg;
1721         int ret;
1722
1723         if (ap->state == ANEG_STATE_UNKNOWN) {
1724                 ap->rxconfig = 0;
1725                 ap->link_time = 0;
1726                 ap->cur_time = 0;
1727                 ap->ability_match_cfg = 0;
1728                 ap->ability_match_count = 0;
1729                 ap->ability_match = 0;
1730                 ap->idle_match = 0;
1731                 ap->ack_match = 0;
1732         }
1733         ap->cur_time++;
1734
1735         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1736                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1737
1738                 if (rx_cfg_reg != ap->ability_match_cfg) {
1739                         ap->ability_match_cfg = rx_cfg_reg;
1740                         ap->ability_match = 0;
1741                         ap->ability_match_count = 0;
1742                 } else {
1743                         if (++ap->ability_match_count > 1) {
1744                                 ap->ability_match = 1;
1745                                 ap->ability_match_cfg = rx_cfg_reg;
1746                         }
1747                 }
1748                 if (rx_cfg_reg & ANEG_CFG_ACK)
1749                         ap->ack_match = 1;
1750                 else
1751                         ap->ack_match = 0;
1752
1753                 ap->idle_match = 0;
1754         } else {
1755                 ap->idle_match = 1;
1756                 ap->ability_match_cfg = 0;
1757                 ap->ability_match_count = 0;
1758                 ap->ability_match = 0;
1759                 ap->ack_match = 0;
1760
1761                 rx_cfg_reg = 0;
1762         }
1763
1764         ap->rxconfig = rx_cfg_reg;
1765         ret = ANEG_OK;
1766
1767         switch(ap->state) {
1768         case ANEG_STATE_UNKNOWN:
1769                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1770                         ap->state = ANEG_STATE_AN_ENABLE;
1771
1772                 /* fallthru */
1773         case ANEG_STATE_AN_ENABLE:
1774                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1775                 if (ap->flags & MR_AN_ENABLE) {
1776                         ap->link_time = 0;
1777                         ap->cur_time = 0;
1778                         ap->ability_match_cfg = 0;
1779                         ap->ability_match_count = 0;
1780                         ap->ability_match = 0;
1781                         ap->idle_match = 0;
1782                         ap->ack_match = 0;
1783
1784                         ap->state = ANEG_STATE_RESTART_INIT;
1785                 } else {
1786                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1787                 }
1788                 break;
1789
1790         case ANEG_STATE_RESTART_INIT:
1791                 ap->link_time = ap->cur_time;
1792                 ap->flags &= ~(MR_NP_LOADED);
1793                 ap->txconfig = 0;
1794                 tw32(MAC_TX_AUTO_NEG, 0);
1795                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1796                 tw32_f(MAC_MODE, tp->mac_mode);
1797                 udelay(40);
1798
1799                 ret = ANEG_TIMER_ENAB;
1800                 ap->state = ANEG_STATE_RESTART;
1801
1802                 /* fallthru */
1803         case ANEG_STATE_RESTART:
1804                 delta = ap->cur_time - ap->link_time;
1805                 if (delta > ANEG_STATE_SETTLE_TIME) {
1806                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1807                 } else {
1808                         ret = ANEG_TIMER_ENAB;
1809                 }
1810                 break;
1811
1812         case ANEG_STATE_DISABLE_LINK_OK:
1813                 ret = ANEG_DONE;
1814                 break;
1815
1816         case ANEG_STATE_ABILITY_DETECT_INIT:
1817                 ap->flags &= ~(MR_TOGGLE_TX);
1818                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1819                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1820                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1821                 tw32_f(MAC_MODE, tp->mac_mode);
1822                 udelay(40);
1823
1824                 ap->state = ANEG_STATE_ABILITY_DETECT;
1825                 break;
1826
1827         case ANEG_STATE_ABILITY_DETECT:
1828                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1829                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1830                 }
1831                 break;
1832
1833         case ANEG_STATE_ACK_DETECT_INIT:
1834                 ap->txconfig |= ANEG_CFG_ACK;
1835                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1836                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1837                 tw32_f(MAC_MODE, tp->mac_mode);
1838                 udelay(40);
1839
1840                 ap->state = ANEG_STATE_ACK_DETECT;
1841
1842                 /* fallthru */
1843         case ANEG_STATE_ACK_DETECT:
1844                 if (ap->ack_match != 0) {
1845                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1846                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1847                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1848                         } else {
1849                                 ap->state = ANEG_STATE_AN_ENABLE;
1850                         }
1851                 } else if (ap->ability_match != 0 &&
1852                            ap->rxconfig == 0) {
1853                         ap->state = ANEG_STATE_AN_ENABLE;
1854                 }
1855                 break;
1856
1857         case ANEG_STATE_COMPLETE_ACK_INIT:
1858                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1859                         ret = ANEG_FAILED;
1860                         break;
1861                 }
1862                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1863                                MR_LP_ADV_HALF_DUPLEX |
1864                                MR_LP_ADV_SYM_PAUSE |
1865                                MR_LP_ADV_ASYM_PAUSE |
1866                                MR_LP_ADV_REMOTE_FAULT1 |
1867                                MR_LP_ADV_REMOTE_FAULT2 |
1868                                MR_LP_ADV_NEXT_PAGE |
1869                                MR_TOGGLE_RX |
1870                                MR_NP_RX);
1871                 if (ap->rxconfig & ANEG_CFG_FD)
1872                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1873                 if (ap->rxconfig & ANEG_CFG_HD)
1874                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1875                 if (ap->rxconfig & ANEG_CFG_PS1)
1876                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1877                 if (ap->rxconfig & ANEG_CFG_PS2)
1878                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1879                 if (ap->rxconfig & ANEG_CFG_RF1)
1880                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1881                 if (ap->rxconfig & ANEG_CFG_RF2)
1882                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1883                 if (ap->rxconfig & ANEG_CFG_NP)
1884                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1885
1886                 ap->link_time = ap->cur_time;
1887
1888                 ap->flags ^= (MR_TOGGLE_TX);
1889                 if (ap->rxconfig & 0x0008)
1890                         ap->flags |= MR_TOGGLE_RX;
1891                 if (ap->rxconfig & ANEG_CFG_NP)
1892                         ap->flags |= MR_NP_RX;
1893                 ap->flags |= MR_PAGE_RX;
1894
1895                 ap->state = ANEG_STATE_COMPLETE_ACK;
1896                 ret = ANEG_TIMER_ENAB;
1897                 break;
1898
1899         case ANEG_STATE_COMPLETE_ACK:
1900                 if (ap->ability_match != 0 &&
1901                     ap->rxconfig == 0) {
1902                         ap->state = ANEG_STATE_AN_ENABLE;
1903                         break;
1904                 }
1905                 delta = ap->cur_time - ap->link_time;
1906                 if (delta > ANEG_STATE_SETTLE_TIME) {
1907                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1908                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1909                         } else {
1910                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1911                                     !(ap->flags & MR_NP_RX)) {
1912                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1913                                 } else {
1914                                         ret = ANEG_FAILED;
1915                                 }
1916                         }
1917                 }
1918                 break;
1919
1920         case ANEG_STATE_IDLE_DETECT_INIT:
1921                 ap->link_time = ap->cur_time;
1922                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1923                 tw32_f(MAC_MODE, tp->mac_mode);
1924                 udelay(40);
1925
1926                 ap->state = ANEG_STATE_IDLE_DETECT;
1927                 ret = ANEG_TIMER_ENAB;
1928                 break;
1929
1930         case ANEG_STATE_IDLE_DETECT:
1931                 if (ap->ability_match != 0 &&
1932                     ap->rxconfig == 0) {
1933                         ap->state = ANEG_STATE_AN_ENABLE;
1934                         break;
1935                 }
1936                 delta = ap->cur_time - ap->link_time;
1937                 if (delta > ANEG_STATE_SETTLE_TIME) {
1938                         /* XXX another gem from the Broadcom driver :( */
1939                         ap->state = ANEG_STATE_LINK_OK;
1940                 }
1941                 break;
1942
1943         case ANEG_STATE_LINK_OK:
1944                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1945                 ret = ANEG_DONE;
1946                 break;
1947
1948         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1949                 /* ??? unimplemented */
1950                 break;
1951
1952         case ANEG_STATE_NEXT_PAGE_WAIT:
1953                 /* ??? unimplemented */
1954                 break;
1955
1956         default:
1957                 ret = ANEG_FAILED;
1958                 break;
1959         };
1960
1961         return ret;
1962 }
1963
1964 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
1965 {
1966         u32 orig_pause_cfg;
1967         u16 orig_active_speed;
1968         u8 orig_active_duplex;
1969         int current_link_up;
1970         int i;
1971
1972         orig_pause_cfg =
1973                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1974                                   TG3_FLAG_TX_PAUSE));
1975         orig_active_speed = tp->link_config.active_speed;
1976         orig_active_duplex = tp->link_config.active_duplex;
1977
1978         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1979         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1980         tw32_f(MAC_MODE, tp->mac_mode);
1981         udelay(40);
1982
1983         /* Reset when initting first time or we have a link. */
1984         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1985             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1986                 /* Set PLL lock range. */
1987                 tg3_writephy(tp, 0x16, 0x8007);
1988
1989                 /* SW reset */
1990                 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1991
1992                 /* Wait for reset to complete. */
1993                 /* XXX schedule_timeout() ... */
1994                 for (i = 0; i < 500; i++)
1995                         udelay(10);
1996
1997                 /* Config mode; select PMA/Ch 1 regs. */
1998                 tg3_writephy(tp, 0x10, 0x8411);
1999
2000                 /* Enable auto-lock and comdet, select txclk for tx. */
2001                 tg3_writephy(tp, 0x11, 0x0a10);
2002
2003                 tg3_writephy(tp, 0x18, 0x00a0);
2004                 tg3_writephy(tp, 0x16, 0x41ff);
2005
2006                 /* Assert and deassert POR. */
2007                 tg3_writephy(tp, 0x13, 0x0400);
2008                 udelay(40);
2009                 tg3_writephy(tp, 0x13, 0x0000);
2010
2011                 tg3_writephy(tp, 0x11, 0x0a50);
2012                 udelay(40);
2013                 tg3_writephy(tp, 0x11, 0x0a10);
2014
2015                 /* Wait for signal to stabilize */
2016                 /* XXX schedule_timeout() ... */
2017                 for (i = 0; i < 15000; i++)
2018                         udelay(10);
2019
2020                 /* Deselect the channel register so we can read the PHYID
2021                  * later.
2022                  */
2023                 tg3_writephy(tp, 0x10, 0x8011);
2024         }
2025
2026         /* Enable link change interrupt unless serdes polling.  */
2027         if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
2028                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2029         else
2030                 tw32_f(MAC_EVENT, 0);
2031         udelay(40);
2032
2033         current_link_up = 0;
2034         if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
2035                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2036                     !(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
2037                         struct tg3_fiber_aneginfo aninfo;
2038                         int status = ANEG_FAILED;
2039                         unsigned int tick;
2040                         u32 tmp;
2041
2042                         memset(&aninfo, 0, sizeof(aninfo));
2043                         aninfo.flags |= (MR_AN_ENABLE);
2044
2045                         tw32(MAC_TX_AUTO_NEG, 0);
2046
2047                         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2048                         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2049                         udelay(40);
2050
2051                         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2052                         udelay(40);
2053
2054                         aninfo.state = ANEG_STATE_UNKNOWN;
2055                         aninfo.cur_time = 0;
2056                         tick = 0;
2057                         while (++tick < 195000) {
2058                                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2059                                 if (status == ANEG_DONE ||
2060                                     status == ANEG_FAILED)
2061                                         break;
2062
2063                                 udelay(1);
2064                         }
2065
2066                         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2067                         tw32_f(MAC_MODE, tp->mac_mode);
2068                         udelay(40);
2069
2070                         if (status == ANEG_DONE &&
2071                             (aninfo.flags &
2072                              (MR_AN_COMPLETE | MR_LINK_OK |
2073                               MR_LP_ADV_FULL_DUPLEX))) {
2074                                 u32 local_adv, remote_adv;
2075
2076                                 local_adv = ADVERTISE_PAUSE_CAP;
2077                                 remote_adv = 0;
2078                                 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
2079                                         remote_adv |= LPA_PAUSE_CAP;
2080                                 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
2081                                         remote_adv |= LPA_PAUSE_ASYM;
2082
2083                                 tg3_setup_flow_control(tp, local_adv, remote_adv);
2084
2085                                 tp->tg3_flags |=
2086                                         TG3_FLAG_GOT_SERDES_FLOWCTL;
2087                                 current_link_up = 1;
2088                         }
2089                         for (i = 0; i < 60; i++) {
2090                                 udelay(20);
2091                                 tw32_f(MAC_STATUS,
2092                                      (MAC_STATUS_SYNC_CHANGED |
2093                                       MAC_STATUS_CFG_CHANGED));
2094                                 udelay(40);
2095                                 if ((tr32(MAC_STATUS) &
2096                                      (MAC_STATUS_SYNC_CHANGED |
2097                                       MAC_STATUS_CFG_CHANGED)) == 0)
2098                                         break;
2099                         }
2100                         if (current_link_up == 0 &&
2101                             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2102                                 current_link_up = 1;
2103                         }
2104                 } else {
2105                         /* Forcing 1000FD link up. */
2106                         current_link_up = 1;
2107                 }
2108         }
2109
2110         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2111         tw32_f(MAC_MODE, tp->mac_mode);
2112         udelay(40);
2113
2114         tp->hw_status->status =
2115                 (SD_STATUS_UPDATED |
2116                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2117
2118         for (i = 0; i < 100; i++) {
2119                 udelay(20);
2120                 tw32_f(MAC_STATUS,
2121                      (MAC_STATUS_SYNC_CHANGED |
2122                       MAC_STATUS_CFG_CHANGED));
2123                 udelay(40);
2124                 if ((tr32(MAC_STATUS) &
2125                      (MAC_STATUS_SYNC_CHANGED |
2126                       MAC_STATUS_CFG_CHANGED)) == 0)
2127                         break;
2128         }
2129
2130         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
2131                 current_link_up = 0;
2132
2133         if (current_link_up == 1) {
2134                 tp->link_config.active_speed = SPEED_1000;
2135                 tp->link_config.active_duplex = DUPLEX_FULL;
2136                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2137                                     LED_CTRL_LNKLED_OVERRIDE |
2138                                     LED_CTRL_1000MBPS_ON));
2139         } else {
2140                 tp->link_config.active_speed = SPEED_INVALID;
2141                 tp->link_config.active_duplex = DUPLEX_INVALID;
2142                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2143                                     LED_CTRL_LNKLED_OVERRIDE |
2144                                     LED_CTRL_TRAFFIC_OVERRIDE));
2145         }
2146
2147         if (current_link_up != netif_carrier_ok(tp->dev)) {
2148                 if (current_link_up)
2149                         netif_carrier_on(tp->dev);
2150                 else
2151                         netif_carrier_off(tp->dev);
2152                 tg3_link_report(tp);
2153         } else {
2154                 u32 now_pause_cfg =
2155                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2156                                          TG3_FLAG_TX_PAUSE);
2157                 if (orig_pause_cfg != now_pause_cfg ||
2158                     orig_active_speed != tp->link_config.active_speed ||
2159                     orig_active_duplex != tp->link_config.active_duplex)
2160                         tg3_link_report(tp);
2161         }
2162
2163         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
2164                 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
2165                 udelay(40);
2166                 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
2167                         tw32_f(MAC_MODE, tp->mac_mode);
2168                         udelay(40);
2169                 }
2170         }
2171
2172         return 0;
2173 }
2174
2175 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2176 {
2177         int err;
2178
2179         if (tp->phy_id == PHY_ID_SERDES) {
2180                 err = tg3_setup_fiber_phy(tp, force_reset);
2181         } else {
2182                 err = tg3_setup_copper_phy(tp, force_reset);
2183         }
2184
2185         if (tp->link_config.active_speed == SPEED_1000 &&
2186             tp->link_config.active_duplex == DUPLEX_HALF)
2187                 tw32(MAC_TX_LENGTHS,
2188                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2189                       (6 << TX_LENGTHS_IPG_SHIFT) |
2190                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2191         else
2192                 tw32(MAC_TX_LENGTHS,
2193                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2194                       (6 << TX_LENGTHS_IPG_SHIFT) |
2195                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2196
2197         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2198             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2199                 if (netif_carrier_ok(tp->dev)) {
2200                         tw32(HOSTCC_STAT_COAL_TICKS,
2201                              DEFAULT_STAT_COAL_TICKS);
2202                 } else {
2203                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2204                 }
2205         }
2206
2207         return err;
2208 }
2209
2210 /* Tigon3 never reports partial packet sends.  So we do not
2211  * need special logic to handle SKBs that have not had all
2212  * of their frags sent yet, like SunGEM does.
2213  */
2214 static void tg3_tx(struct tg3 *tp)
2215 {
2216         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2217         u32 sw_idx = tp->tx_cons;
2218
2219         while (sw_idx != hw_idx) {
2220                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2221                 struct sk_buff *skb = ri->skb;
2222                 int i;
2223
2224                 if (unlikely(skb == NULL))
2225                         BUG();
2226
2227                 pci_unmap_single(tp->pdev,
2228                                  pci_unmap_addr(ri, mapping),
2229                                  skb_headlen(skb),
2230                                  PCI_DMA_TODEVICE);
2231
2232                 ri->skb = NULL;
2233
2234                 sw_idx = NEXT_TX(sw_idx);
2235
2236                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2237                         if (unlikely(sw_idx == hw_idx))
2238                                 BUG();
2239
2240                         ri = &tp->tx_buffers[sw_idx];
2241                         if (unlikely(ri->skb != NULL))
2242                                 BUG();
2243
2244                         pci_unmap_page(tp->pdev,
2245                                        pci_unmap_addr(ri, mapping),
2246                                        skb_shinfo(skb)->frags[i].size,
2247                                        PCI_DMA_TODEVICE);
2248
2249                         sw_idx = NEXT_TX(sw_idx);
2250                 }
2251
2252                 dev_kfree_skb_irq(skb);
2253         }
2254
2255         tp->tx_cons = sw_idx;
2256
2257         if (netif_queue_stopped(tp->dev) &&
2258             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2259                 netif_wake_queue(tp->dev);
2260 }
2261
2262 /* Returns size of skb allocated or < 0 on error.
2263  *
2264  * We only need to fill in the address because the other members
2265  * of the RX descriptor are invariant, see tg3_init_rings.
2266  *
2267  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2268  * posting buffers we only dirty the first cache line of the RX
2269  * descriptor (containing the address).  Whereas for the RX status
2270  * buffers the cpu only reads the last cacheline of the RX descriptor
2271  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2272  */
2273 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2274                             int src_idx, u32 dest_idx_unmasked)
2275 {
2276         struct tg3_rx_buffer_desc *desc;
2277         struct ring_info *map, *src_map;
2278         struct sk_buff *skb;
2279         dma_addr_t mapping;
2280         int skb_size, dest_idx;
2281
2282         src_map = NULL;
2283         switch (opaque_key) {
2284         case RXD_OPAQUE_RING_STD:
2285                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2286                 desc = &tp->rx_std[dest_idx];
2287                 map = &tp->rx_std_buffers[dest_idx];
2288                 if (src_idx >= 0)
2289                         src_map = &tp->rx_std_buffers[src_idx];
2290                 skb_size = RX_PKT_BUF_SZ;
2291                 break;
2292
2293         case RXD_OPAQUE_RING_JUMBO:
2294                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2295                 desc = &tp->rx_jumbo[dest_idx];
2296                 map = &tp->rx_jumbo_buffers[dest_idx];
2297                 if (src_idx >= 0)
2298                         src_map = &tp->rx_jumbo_buffers[src_idx];
2299                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2300                 break;
2301
2302         default:
2303                 return -EINVAL;
2304         };
2305
2306         /* Do not overwrite any of the map or rp information
2307          * until we are sure we can commit to a new buffer.
2308          *
2309          * Callers depend upon this behavior and assume that
2310          * we leave everything unchanged if we fail.
2311          */
2312         skb = dev_alloc_skb(skb_size);
2313         if (skb == NULL)
2314                 return -ENOMEM;
2315
2316         skb->dev = tp->dev;
2317         skb_reserve(skb, tp->rx_offset);
2318
2319         mapping = pci_map_single(tp->pdev, skb->data,
2320                                  skb_size - tp->rx_offset,
2321                                  PCI_DMA_FROMDEVICE);
2322
2323         map->skb = skb;
2324         pci_unmap_addr_set(map, mapping, mapping);
2325
2326         if (src_map != NULL)
2327                 src_map->skb = NULL;
2328
2329         desc->addr_hi = ((u64)mapping >> 32);
2330         desc->addr_lo = ((u64)mapping & 0xffffffff);
2331
2332         return skb_size;
2333 }
2334
2335 /* We only need to move over in the address because the other
2336  * members of the RX descriptor are invariant.  See notes above
2337  * tg3_alloc_rx_skb for full details.
2338  */
2339 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2340                            int src_idx, u32 dest_idx_unmasked)
2341 {
2342         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2343         struct ring_info *src_map, *dest_map;
2344         int dest_idx;
2345
2346         switch (opaque_key) {
2347         case RXD_OPAQUE_RING_STD:
2348                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2349                 dest_desc = &tp->rx_std[dest_idx];
2350                 dest_map = &tp->rx_std_buffers[dest_idx];
2351                 src_desc = &tp->rx_std[src_idx];
2352                 src_map = &tp->rx_std_buffers[src_idx];
2353                 break;
2354
2355         case RXD_OPAQUE_RING_JUMBO:
2356                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2357                 dest_desc = &tp->rx_jumbo[dest_idx];
2358                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2359                 src_desc = &tp->rx_jumbo[src_idx];
2360                 src_map = &tp->rx_jumbo_buffers[src_idx];
2361                 break;
2362
2363         default:
2364                 return;
2365         };
2366
2367         dest_map->skb = src_map->skb;
2368         pci_unmap_addr_set(dest_map, mapping,
2369                            pci_unmap_addr(src_map, mapping));
2370         dest_desc->addr_hi = src_desc->addr_hi;
2371         dest_desc->addr_lo = src_desc->addr_lo;
2372
2373         src_map->skb = NULL;
2374 }
2375
2376 #if TG3_VLAN_TAG_USED
2377 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2378 {
2379         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2380 }
2381 #endif
2382
2383 /* The RX ring scheme is composed of multiple rings which post fresh
2384  * buffers to the chip, and one special ring the chip uses to report
2385  * status back to the host.
2386  *
2387  * The special ring reports the status of received packets to the
2388  * host.  The chip does not write into the original descriptor the
2389  * RX buffer was obtained from.  The chip simply takes the original
2390  * descriptor as provided by the host, updates the status and length
2391  * field, then writes this into the next status ring entry.
2392  *
2393  * Each ring the host uses to post buffers to the chip is described
2394  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2395  * it is first placed into the on-chip ram.  When the packet's length
2396  * is known, it walks down the TG3_BDINFO entries to select the ring.
2397  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2398  * which is within the range of the new packet's length is chosen.
2399  *
2400  * The "separate ring for rx status" scheme may sound queer, but it makes
2401  * sense from a cache coherency perspective.  If only the host writes
2402  * to the buffer post rings, and only the chip writes to the rx status
2403  * rings, then cache lines never move beyond shared-modified state.
2404  * If both the host and chip were to write into the same ring, cache line
2405  * eviction could occur since both entities want it in an exclusive state.
2406  */
2407 static int tg3_rx(struct tg3 *tp, int budget)
2408 {
2409         u32 work_mask;
2410         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2411         u16 hw_idx, sw_idx;
2412         int received;
2413
2414         hw_idx = tp->hw_status->idx[0].rx_producer;
2415         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2416         work_mask = 0;
2417         received = 0;
2418         while (sw_idx != hw_idx && budget > 0) {
2419                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2420                 unsigned int len;
2421                 struct sk_buff *skb;
2422                 dma_addr_t dma_addr;
2423                 u32 opaque_key, desc_idx, *post_ptr;
2424
2425                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2426                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2427                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2428                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2429                                                   mapping);
2430                         skb = tp->rx_std_buffers[desc_idx].skb;
2431                         post_ptr = &tp->rx_std_ptr;
2432                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2433                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2434                                                   mapping);
2435                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2436                         post_ptr = &tp->rx_jumbo_ptr;
2437                 }
2438                 else {
2439                         goto next_pkt_nopost;
2440                 }
2441
2442                 work_mask |= opaque_key;
2443
2444                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2445                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2446                 drop_it:
2447                         tg3_recycle_rx(tp, opaque_key,
2448                                        desc_idx, *post_ptr);
2449                 drop_it_no_recycle:
2450                         /* Other statistics kept track of by card. */
2451                         tp->net_stats.rx_dropped++;
2452                         goto next_pkt;
2453                 }
2454
2455                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2456
2457                 if (len > RX_COPY_THRESHOLD) {
2458                         int skb_size;
2459
2460                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2461                                                     desc_idx, *post_ptr);
2462                         if (skb_size < 0)
2463                                 goto drop_it;
2464
2465                         pci_unmap_single(tp->pdev, dma_addr,
2466                                          skb_size - tp->rx_offset,
2467                                          PCI_DMA_FROMDEVICE);
2468
2469                         skb_put(skb, len);
2470                 } else {
2471                         struct sk_buff *copy_skb;
2472
2473                         tg3_recycle_rx(tp, opaque_key,
2474                                        desc_idx, *post_ptr);
2475
2476                         copy_skb = dev_alloc_skb(len + 2);
2477                         if (copy_skb == NULL)
2478                                 goto drop_it_no_recycle;
2479
2480                         copy_skb->dev = tp->dev;
2481                         skb_reserve(copy_skb, 2);
2482                         skb_put(copy_skb, len);
2483                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2484                         memcpy(copy_skb->data, skb->data, len);
2485                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2486
2487                         /* We'll reuse the original ring buffer. */
2488                         skb = copy_skb;
2489                 }
2490
2491                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2492                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2493                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2494                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2495                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2496                 else
2497                         skb->ip_summed = CHECKSUM_NONE;
2498
2499                 skb->protocol = eth_type_trans(skb, tp->dev);
2500 #if TG3_VLAN_TAG_USED
2501                 if (tp->vlgrp != NULL &&
2502                     desc->type_flags & RXD_FLAG_VLAN) {
2503                         tg3_vlan_rx(tp, skb,
2504                                     desc->err_vlan & RXD_VLAN_MASK);
2505                 } else
2506 #endif
2507                         netif_receive_skb(skb);
2508
2509                 tp->dev->last_rx = jiffies;
2510                 received++;
2511                 budget--;
2512
2513 next_pkt:
2514                 (*post_ptr)++;
2515 next_pkt_nopost:
2516                 rx_rcb_ptr++;
2517                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2518         }
2519
2520         /* ACK the status ring. */
2521         tp->rx_rcb_ptr = rx_rcb_ptr;
2522         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2523                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2524
2525         /* Refill RX ring(s). */
2526         if (work_mask & RXD_OPAQUE_RING_STD) {
2527                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2528                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2529                              sw_idx);
2530         }
2531         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2532                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2533                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2534                              sw_idx);
2535         }
2536
2537         return received;
2538 }
2539
2540 static int tg3_poll(struct net_device *netdev, int *budget)
2541 {
2542         struct tg3 *tp = netdev_priv(netdev);
2543         struct tg3_hw_status *sblk = tp->hw_status;
2544         unsigned long flags;
2545         int done;
2546
2547         spin_lock_irqsave(&tp->lock, flags);
2548
2549         /* handle link change and other phy events */
2550         if (!(tp->tg3_flags &
2551               (TG3_FLAG_USE_LINKCHG_REG |
2552                TG3_FLAG_POLL_SERDES))) {
2553                 if (sblk->status & SD_STATUS_LINK_CHG) {
2554                         sblk->status = SD_STATUS_UPDATED |
2555                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2556                         tg3_setup_phy(tp, 0);
2557                 }
2558         }
2559
2560         /* run TX completion thread */
2561         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2562                 spin_lock(&tp->tx_lock);
2563                 tg3_tx(tp);
2564                 spin_unlock(&tp->tx_lock);
2565         }
2566
2567         spin_unlock_irqrestore(&tp->lock, flags);
2568
2569         /* run RX thread, within the bounds set by NAPI.
2570          * All RX "locking" is done by ensuring outside
2571          * code synchronizes with dev->poll()
2572          */
2573         done = 1;
2574         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2575                 int orig_budget = *budget;
2576                 int work_done;
2577
2578                 if (orig_budget > netdev->quota)
2579                         orig_budget = netdev->quota;
2580
2581                 work_done = tg3_rx(tp, orig_budget);
2582
2583                 *budget -= work_done;
2584                 netdev->quota -= work_done;
2585
2586                 if (work_done >= orig_budget)
2587                         done = 0;
2588         }
2589
2590         /* if no more work, tell net stack and NIC we're done */
2591         if (done) {
2592                 spin_lock_irqsave(&tp->lock, flags);
2593                 __netif_rx_complete(netdev);
2594                 tg3_enable_ints(tp);
2595                 spin_unlock_irqrestore(&tp->lock, flags);
2596         }
2597
2598         return (done ? 0 : 1);
2599 }
2600
2601 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2602 {
2603         struct tg3_hw_status *sblk = tp->hw_status;
2604         unsigned int work_exists = 0;
2605
2606         /* check for phy events */
2607         if (!(tp->tg3_flags &
2608               (TG3_FLAG_USE_LINKCHG_REG |
2609                TG3_FLAG_POLL_SERDES))) {
2610                 if (sblk->status & SD_STATUS_LINK_CHG)
2611                         work_exists = 1;
2612         }
2613         /* check for RX/TX work to do */
2614         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2615             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2616                 work_exists = 1;
2617
2618         return work_exists;
2619 }
2620
2621 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2622 {
2623         struct net_device *dev = dev_id;
2624         struct tg3 *tp = netdev_priv(dev);
2625         struct tg3_hw_status *sblk = tp->hw_status;
2626         unsigned long flags;
2627         unsigned int handled = 1;
2628
2629         spin_lock_irqsave(&tp->lock, flags);
2630
2631         if (sblk->status & SD_STATUS_UPDATED) {
2632                 /*
2633                  * writing any value to intr-mbox-0 clears PCI INTA# and
2634                  * chip-internal interrupt pending events.
2635                  * writing non-zero to intr-mbox-0 additional tells the
2636                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2637                  * event coalescing.
2638                  */
2639                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2640                              0x00000001);
2641                 /*
2642                  * Flush PCI write.  This also guarantees that our
2643                  * status block has been flushed to host memory.
2644                  */
2645                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2646                 sblk->status &= ~SD_STATUS_UPDATED;
2647
2648                 if (likely(tg3_has_work(dev, tp)))
2649                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2650                 else {
2651                         /* no work, shared interrupt perhaps?  re-enable
2652                          * interrupts, and flush that PCI write
2653                          */
2654                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2655                                 0x00000000);
2656                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2657                 }
2658         } else {        /* shared interrupt */
2659                 handled = 0;
2660         }
2661
2662         spin_unlock_irqrestore(&tp->lock, flags);
2663
2664         return IRQ_RETVAL(handled);
2665 }
2666
2667 static int tg3_init_hw(struct tg3 *);
2668 static int tg3_halt(struct tg3 *);
2669
2670 #ifdef CONFIG_NET_POLL_CONTROLLER
2671 static void tg3_poll_controller(struct net_device *dev)
2672 {
2673         tg3_interrupt(dev->irq, dev, NULL);
2674 }
2675 #endif
2676
2677 static void tg3_reset_task(void *_data)
2678 {
2679         struct tg3 *tp = _data;
2680         unsigned int restart_timer;
2681
2682         tg3_netif_stop(tp);
2683
2684         spin_lock_irq(&tp->lock);
2685         spin_lock(&tp->tx_lock);
2686
2687         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2688         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2689
2690         tg3_halt(tp);
2691         tg3_init_hw(tp);
2692
2693         spin_unlock(&tp->tx_lock);
2694         spin_unlock_irq(&tp->lock);
2695
2696         tg3_netif_start(tp);
2697
2698         if (restart_timer)
2699                 mod_timer(&tp->timer, jiffies + 1);
2700 }
2701
2702 static void tg3_tx_timeout(struct net_device *dev)
2703 {
2704         struct tg3 *tp = netdev_priv(dev);
2705
2706         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2707                dev->name);
2708
2709         schedule_work(&tp->reset_task);
2710 }
2711
2712 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2713
2714 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2715                                        u32 guilty_entry, int guilty_len,
2716                                        u32 last_plus_one, u32 *start, u32 mss)
2717 {
2718         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2719         dma_addr_t new_addr;
2720         u32 entry = *start;
2721         int i;
2722
2723         if (!new_skb) {
2724                 dev_kfree_skb(skb);
2725                 return -1;
2726         }
2727
2728         /* New SKB is guaranteed to be linear. */
2729         entry = *start;
2730         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2731                                   PCI_DMA_TODEVICE);
2732         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2733                     (skb->ip_summed == CHECKSUM_HW) ?
2734                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2735         *start = NEXT_TX(entry);
2736
2737         /* Now clean up the sw ring entries. */
2738         i = 0;
2739         while (entry != last_plus_one) {
2740                 int len;
2741
2742                 if (i == 0)
2743                         len = skb_headlen(skb);
2744                 else
2745                         len = skb_shinfo(skb)->frags[i-1].size;
2746                 pci_unmap_single(tp->pdev,
2747                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2748                                  len, PCI_DMA_TODEVICE);
2749                 if (i == 0) {
2750                         tp->tx_buffers[entry].skb = new_skb;
2751                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2752                 } else {
2753                         tp->tx_buffers[entry].skb = NULL;
2754                 }
2755                 entry = NEXT_TX(entry);
2756         }
2757
2758         dev_kfree_skb(skb);
2759
2760         return 0;
2761 }
2762
2763 static void tg3_set_txd(struct tg3 *tp, int entry,
2764                         dma_addr_t mapping, int len, u32 flags,
2765                         u32 mss_and_is_end)
2766 {
2767         int is_end = (mss_and_is_end & 0x1);
2768         u32 mss = (mss_and_is_end >> 1);
2769         u32 vlan_tag = 0;
2770
2771         if (is_end)
2772                 flags |= TXD_FLAG_END;
2773         if (flags & TXD_FLAG_VLAN) {
2774                 vlan_tag = flags >> 16;
2775                 flags &= 0xffff;
2776         }
2777         vlan_tag |= (mss << TXD_MSS_SHIFT);
2778         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2779                 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2780
2781                 txd->addr_hi = ((u64) mapping >> 32);
2782                 txd->addr_lo = ((u64) mapping & 0xffffffff);
2783                 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2784                 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2785         } else {
2786                 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2787                 unsigned long txd;
2788
2789                 txd = (tp->regs +
2790                        NIC_SRAM_WIN_BASE +
2791                        NIC_SRAM_TX_BUFFER_DESC);
2792                 txd += (entry * TXD_SIZE);
2793
2794                 /* Save some PIOs */
2795                 if (sizeof(dma_addr_t) != sizeof(u32))
2796                         writel(((u64) mapping >> 32),
2797                                txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2798
2799                 writel(((u64) mapping & 0xffffffff),
2800                        txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2801                 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2802                 if (txr->prev_vlan_tag != vlan_tag) {
2803                         writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2804                         txr->prev_vlan_tag = vlan_tag;
2805                 }
2806         }
2807 }
2808
2809 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2810 {
2811         u32 base = (u32) mapping & 0xffffffff;
2812
2813         return ((base > 0xffffdcc0) &&
2814                 ((u64) mapping >> 32) == 0 &&
2815                 (base + len + 8 < base));
2816 }
2817
2818 static int tg3_start_xmit_4gbug(struct sk_buff *skb, struct net_device *dev)
2819 {
2820         struct tg3 *tp = netdev_priv(dev);
2821         dma_addr_t mapping;
2822         unsigned int i;
2823         u32 len, entry, base_flags, mss;
2824         int would_hit_hwbug;
2825         unsigned long flags;
2826
2827         len = skb_headlen(skb);
2828
2829         /* No BH disabling for tx_lock here.  We are running in BH disabled
2830          * context and TX reclaim runs via tp->poll inside of a software
2831          * interrupt.  Rejoice!
2832          *
2833          * Actually, things are not so simple.  If we are to take a hw
2834          * IRQ here, we can deadlock, consider:
2835          *
2836          *       CPU1           CPU2
2837          *   tg3_start_xmit
2838          *   take tp->tx_lock
2839          *                      tg3_timer
2840          *                      take tp->lock
2841          *   tg3_interrupt
2842          *   spin on tp->lock
2843          *                      spin on tp->tx_lock
2844          *
2845          * So we really do need to disable interrupts when taking
2846          * tx_lock here.
2847          */
2848         spin_lock_irqsave(&tp->tx_lock, flags);
2849
2850         /* This is a hard error, log it. */
2851         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2852                 netif_stop_queue(dev);
2853                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2854                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2855                        dev->name);
2856                 return 1;
2857         }
2858
2859         entry = tp->tx_prod;
2860         base_flags = 0;
2861         if (skb->ip_summed == CHECKSUM_HW)
2862                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2863 #if TG3_TSO_SUPPORT != 0
2864         mss = 0;
2865         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2866             (mss = skb_shinfo(skb)->tso_size) != 0) {
2867                 int tcp_opt_len, ip_tcp_len;
2868
2869                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2870                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2871
2872                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2873                                TXD_FLAG_CPU_POST_DMA);
2874
2875                 skb->nh.iph->check = 0;
2876                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2877                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2878                                                       skb->nh.iph->daddr,
2879                                                       0, IPPROTO_TCP, 0);
2880
2881                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2882                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2883                                 int tsflags;
2884
2885                                 tsflags = ((skb->nh.iph->ihl - 5) +
2886                                            (tcp_opt_len >> 2));
2887                                 mss |= (tsflags << 11);
2888                         }
2889                 } else {
2890                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2891                                 int tsflags;
2892
2893                                 tsflags = ((skb->nh.iph->ihl - 5) +
2894                                            (tcp_opt_len >> 2));
2895                                 base_flags |= tsflags << 12;
2896                         }
2897                 }
2898         }
2899 #else
2900         mss = 0;
2901 #endif
2902 #if TG3_VLAN_TAG_USED
2903         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2904                 base_flags |= (TXD_FLAG_VLAN |
2905                                (vlan_tx_tag_get(skb) << 16));
2906 #endif
2907
2908         /* Queue skb data, a.k.a. the main skb fragment. */
2909         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2910
2911         tp->tx_buffers[entry].skb = skb;
2912         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2913
2914         would_hit_hwbug = 0;
2915
2916         if (tg3_4g_overflow_test(mapping, len))
2917                 would_hit_hwbug = entry + 1;
2918
2919         tg3_set_txd(tp, entry, mapping, len, base_flags,
2920                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2921
2922         entry = NEXT_TX(entry);
2923
2924         /* Now loop through additional data fragments, and queue them. */
2925         if (skb_shinfo(skb)->nr_frags > 0) {
2926                 unsigned int i, last;
2927
2928                 last = skb_shinfo(skb)->nr_frags - 1;
2929                 for (i = 0; i <= last; i++) {
2930                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2931
2932                         len = frag->size;
2933                         mapping = pci_map_page(tp->pdev,
2934                                                frag->page,
2935                                                frag->page_offset,
2936                                                len, PCI_DMA_TODEVICE);
2937
2938                         tp->tx_buffers[entry].skb = NULL;
2939                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2940
2941                         if (tg3_4g_overflow_test(mapping, len)) {
2942                                 /* Only one should match. */
2943                                 if (would_hit_hwbug)
2944                                         BUG();
2945                                 would_hit_hwbug = entry + 1;
2946                         }
2947
2948                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
2949                                 tg3_set_txd(tp, entry, mapping, len,
2950                                             base_flags, (i == last)|(mss << 1));
2951                         else
2952                                 tg3_set_txd(tp, entry, mapping, len,
2953                                             base_flags, (i == last));
2954
2955                         entry = NEXT_TX(entry);
2956                 }
2957         }
2958
2959         if (would_hit_hwbug) {
2960                 u32 last_plus_one = entry;
2961                 u32 start;
2962                 unsigned int len = 0;
2963
2964                 would_hit_hwbug -= 1;
2965                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
2966                 entry &= (TG3_TX_RING_SIZE - 1);
2967                 start = entry;
2968                 i = 0;
2969                 while (entry != last_plus_one) {
2970                         if (i == 0)
2971                                 len = skb_headlen(skb);
2972                         else
2973                                 len = skb_shinfo(skb)->frags[i-1].size;
2974
2975                         if (entry == would_hit_hwbug)
2976                                 break;
2977
2978                         i++;
2979                         entry = NEXT_TX(entry);
2980
2981                 }
2982
2983                 /* If the workaround fails due to memory/mapping
2984                  * failure, silently drop this packet.
2985                  */
2986                 if (tigon3_4gb_hwbug_workaround(tp, skb,
2987                                                 entry, len,
2988                                                 last_plus_one,
2989                                                 &start, mss))
2990                         goto out_unlock;
2991
2992                 entry = start;
2993         }
2994
2995         /* Packets are ready, update Tx producer idx local and on card. */
2996         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2997                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2998                               TG3_64BIT_REG_LOW), entry);
2999         } else {
3000                 /* First, make sure tg3 sees last descriptor fully
3001                  * in SRAM.
3002                  */
3003                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3004                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
3005
3006                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3007                               TG3_64BIT_REG_LOW), entry);
3008         }
3009
3010         tp->tx_prod = entry;
3011         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3012                 netif_stop_queue(dev);
3013
3014 out_unlock:
3015         spin_unlock_irqrestore(&tp->tx_lock, flags);
3016
3017         dev->trans_start = jiffies;
3018
3019         return 0;
3020 }
3021
3022 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3023 {
3024         struct tg3 *tp = netdev_priv(dev);
3025         dma_addr_t mapping;
3026         u32 len, entry, base_flags, mss;
3027         unsigned long flags;
3028
3029         len = skb_headlen(skb);
3030
3031         /* No BH disabling for tx_lock here.  We are running in BH disabled
3032          * context and TX reclaim runs via tp->poll inside of a software
3033          * interrupt.  Rejoice!
3034          *
3035          * Actually, things are not so simple.  If we are to take a hw
3036          * IRQ here, we can deadlock, consider:
3037          *
3038          *       CPU1           CPU2
3039          *   tg3_start_xmit
3040          *   take tp->tx_lock
3041          *                      tg3_timer
3042          *                      take tp->lock
3043          *   tg3_interrupt
3044          *   spin on tp->lock
3045          *                      spin on tp->tx_lock
3046          *
3047          * So we really do need to disable interrupts when taking
3048          * tx_lock here.
3049          */
3050         spin_lock_irqsave(&tp->tx_lock, flags);
3051
3052         /* This is a hard error, log it. */
3053         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3054                 netif_stop_queue(dev);
3055                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3056                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3057                        dev->name);
3058                 return 1;
3059         }
3060
3061         entry = tp->tx_prod;
3062         base_flags = 0;
3063         if (skb->ip_summed == CHECKSUM_HW)
3064                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3065 #if TG3_TSO_SUPPORT != 0
3066         mss = 0;
3067         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3068             (mss = skb_shinfo(skb)->tso_size) != 0) {
3069                 int tcp_opt_len, ip_tcp_len;
3070
3071                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3072                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3073
3074                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3075                                TXD_FLAG_CPU_POST_DMA);
3076
3077                 skb->nh.iph->check = 0;
3078                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3079                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3080                                                       skb->nh.iph->daddr,
3081                                                       0, IPPROTO_TCP, 0);
3082
3083                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3084                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3085                                 int tsflags;
3086
3087                                 tsflags = ((skb->nh.iph->ihl - 5) +
3088                                            (tcp_opt_len >> 2));
3089                                 mss |= (tsflags << 11);
3090                         }
3091                 } else {
3092                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3093                                 int tsflags;
3094
3095                                 tsflags = ((skb->nh.iph->ihl - 5) +
3096                                            (tcp_opt_len >> 2));
3097                                 base_flags |= tsflags << 12;
3098                         }
3099                 }
3100         }
3101 #else
3102         mss = 0;
3103 #endif
3104 #if TG3_VLAN_TAG_USED
3105         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3106                 base_flags |= (TXD_FLAG_VLAN |
3107                                (vlan_tx_tag_get(skb) << 16));
3108 #endif
3109
3110         /* Queue skb data, a.k.a. the main skb fragment. */
3111         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3112
3113         tp->tx_buffers[entry].skb = skb;
3114         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3115
3116         tg3_set_txd(tp, entry, mapping, len, base_flags,
3117                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3118
3119         entry = NEXT_TX(entry);
3120
3121         /* Now loop through additional data fragments, and queue them. */
3122         if (skb_shinfo(skb)->nr_frags > 0) {
3123                 unsigned int i, last;
3124
3125                 last = skb_shinfo(skb)->nr_frags - 1;
3126                 for (i = 0; i <= last; i++) {
3127                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3128
3129
3130                         len = frag->size;
3131                         mapping = pci_map_page(tp->pdev,
3132                                                frag->page,
3133                                                frag->page_offset,
3134                                                len, PCI_DMA_TODEVICE);
3135
3136                         tp->tx_buffers[entry].skb = NULL;
3137                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3138
3139                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3140                                 tg3_set_txd(tp, entry, mapping, len,
3141                                             base_flags, (i == last)|(mss << 1));
3142                         else
3143                                 tg3_set_txd(tp, entry, mapping, len,
3144                                             base_flags, (i == last));
3145
3146                         entry = NEXT_TX(entry);
3147                 }
3148         }
3149
3150         /* Packets are ready, update Tx producer idx local and on card.
3151          * We know this is not a 5700 (by virtue of not being a chip
3152          * requiring the 4GB overflow workaround) so we can safely omit
3153          * the double-write bug tests.
3154          */
3155         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3156                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
3157                               TG3_64BIT_REG_LOW), entry);
3158         } else {
3159                 /* First, make sure tg3 sees last descriptor fully
3160                  * in SRAM.
3161                  */
3162                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3163                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
3164                              TG3_64BIT_REG_LOW);
3165
3166                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3167                               TG3_64BIT_REG_LOW), entry);
3168         }
3169
3170         tp->tx_prod = entry;
3171         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3172                 netif_stop_queue(dev);
3173
3174         spin_unlock_irqrestore(&tp->tx_lock, flags);
3175
3176         dev->trans_start = jiffies;
3177
3178         return 0;
3179 }
3180
3181 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3182                                int new_mtu)
3183 {
3184         dev->mtu = new_mtu;
3185
3186         if (new_mtu > ETH_DATA_LEN)
3187                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3188         else
3189                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3190 }
3191
3192 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3193 {
3194         struct tg3 *tp = netdev_priv(dev);
3195
3196         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3197                 return -EINVAL;
3198
3199         if (!netif_running(dev)) {
3200                 /* We'll just catch it later when the
3201                  * device is up'd.
3202                  */
3203                 tg3_set_mtu(dev, tp, new_mtu);
3204                 return 0;
3205         }
3206
3207         tg3_netif_stop(tp);
3208         spin_lock_irq(&tp->lock);
3209         spin_lock(&tp->tx_lock);
3210
3211         tg3_halt(tp);
3212
3213         tg3_set_mtu(dev, tp, new_mtu);
3214
3215         tg3_init_hw(tp);
3216
3217         spin_unlock(&tp->tx_lock);
3218         spin_unlock_irq(&tp->lock);
3219         tg3_netif_start(tp);
3220
3221         return 0;
3222 }
3223
3224 /* Free up pending packets in all rx/tx rings.
3225  *
3226  * The chip has been shut down and the driver detached from
3227  * the networking, so no interrupts or new tx packets will
3228  * end up in the driver.  tp->{tx,}lock is not held and we are not
3229  * in an interrupt context and thus may sleep.
3230  */
3231 static void tg3_free_rings(struct tg3 *tp)
3232 {
3233         struct ring_info *rxp;
3234         int i;
3235
3236         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3237                 rxp = &tp->rx_std_buffers[i];
3238
3239                 if (rxp->skb == NULL)
3240                         continue;
3241                 pci_unmap_single(tp->pdev,
3242                                  pci_unmap_addr(rxp, mapping),
3243                                  RX_PKT_BUF_SZ - tp->rx_offset,
3244                                  PCI_DMA_FROMDEVICE);
3245                 dev_kfree_skb_any(rxp->skb);
3246                 rxp->skb = NULL;
3247         }
3248
3249         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3250                 rxp = &tp->rx_jumbo_buffers[i];
3251
3252                 if (rxp->skb == NULL)
3253                         continue;
3254                 pci_unmap_single(tp->pdev,
3255                                  pci_unmap_addr(rxp, mapping),
3256                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3257                                  PCI_DMA_FROMDEVICE);
3258                 dev_kfree_skb_any(rxp->skb);
3259                 rxp->skb = NULL;
3260         }
3261
3262         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3263                 struct tx_ring_info *txp;
3264                 struct sk_buff *skb;
3265                 int j;
3266
3267                 txp = &tp->tx_buffers[i];
3268                 skb = txp->skb;
3269
3270                 if (skb == NULL) {
3271                         i++;
3272                         continue;
3273                 }
3274
3275                 pci_unmap_single(tp->pdev,
3276                                  pci_unmap_addr(txp, mapping),
3277                                  skb_headlen(skb),
3278                                  PCI_DMA_TODEVICE);
3279                 txp->skb = NULL;
3280
3281                 i++;
3282
3283                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3284                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3285                         pci_unmap_page(tp->pdev,
3286                                        pci_unmap_addr(txp, mapping),
3287                                        skb_shinfo(skb)->frags[j].size,
3288                                        PCI_DMA_TODEVICE);
3289                         i++;
3290                 }
3291
3292                 dev_kfree_skb_any(skb);
3293         }
3294 }
3295
3296 /* Initialize tx/rx rings for packet processing.
3297  *
3298  * The chip has been shut down and the driver detached from
3299  * the networking, so no interrupts or new tx packets will
3300  * end up in the driver.  tp->{tx,}lock are held and thus
3301  * we may not sleep.
3302  */
3303 static void tg3_init_rings(struct tg3 *tp)
3304 {
3305         unsigned long start, end;
3306         u32 i;
3307
3308         /* Free up all the SKBs. */
3309         tg3_free_rings(tp);
3310
3311         /* Zero out all descriptors. */
3312         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3313         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3314         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3315
3316         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3317                 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3318         } else {
3319                 start = (tp->regs +
3320                          NIC_SRAM_WIN_BASE +
3321                          NIC_SRAM_TX_BUFFER_DESC);
3322                 end = start + TG3_TX_RING_BYTES;
3323                 while (start < end) {
3324                         writel(0, start);
3325                         start += 4;
3326                 }
3327                 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3328                         tp->tx_buffers[i].prev_vlan_tag = 0;
3329         }
3330
3331         /* Initialize invariants of the rings, we only set this
3332          * stuff once.  This works because the card does not
3333          * write into the rx buffer posting rings.
3334          */
3335         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3336                 struct tg3_rx_buffer_desc *rxd;
3337
3338                 rxd = &tp->rx_std[i];
3339                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3340                         << RXD_LEN_SHIFT;
3341                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3342                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3343                                (i << RXD_OPAQUE_INDEX_SHIFT));
3344         }
3345
3346         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3347                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3348                         struct tg3_rx_buffer_desc *rxd;
3349
3350                         rxd = &tp->rx_jumbo[i];
3351                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3352                                 << RXD_LEN_SHIFT;
3353                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3354                                 RXD_FLAG_JUMBO;
3355                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3356                                (i << RXD_OPAQUE_INDEX_SHIFT));
3357                 }
3358         }
3359
3360         /* Now allocate fresh SKBs for each rx ring. */
3361         for (i = 0; i < tp->rx_pending; i++) {
3362                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3363                                      -1, i) < 0)
3364                         break;
3365         }
3366
3367         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3368                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3369                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3370                                              -1, i) < 0)
3371                                 break;
3372                 }
3373         }
3374 }
3375
3376 /*
3377  * Must not be invoked with interrupt sources disabled and
3378  * the hardware shutdown down.
3379  */
3380 static void tg3_free_consistent(struct tg3 *tp)
3381 {
3382         if (tp->rx_std_buffers) {
3383                 kfree(tp->rx_std_buffers);
3384                 tp->rx_std_buffers = NULL;
3385         }
3386         if (tp->rx_std) {
3387                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3388                                     tp->rx_std, tp->rx_std_mapping);
3389                 tp->rx_std = NULL;
3390         }
3391         if (tp->rx_jumbo) {
3392                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3393                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3394                 tp->rx_jumbo = NULL;
3395         }
3396         if (tp->rx_rcb) {
3397                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3398                                     tp->rx_rcb, tp->rx_rcb_mapping);
3399                 tp->rx_rcb = NULL;
3400         }
3401         if (tp->tx_ring) {
3402                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3403                         tp->tx_ring, tp->tx_desc_mapping);
3404                 tp->tx_ring = NULL;
3405         }
3406         if (tp->hw_status) {
3407                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3408                                     tp->hw_status, tp->status_mapping);
3409                 tp->hw_status = NULL;
3410         }
3411         if (tp->hw_stats) {
3412                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3413                                     tp->hw_stats, tp->stats_mapping);
3414                 tp->hw_stats = NULL;
3415         }
3416 }
3417
3418 /*
3419  * Must not be invoked with interrupt sources disabled and
3420  * the hardware shutdown down.  Can sleep.
3421  */
3422 static int tg3_alloc_consistent(struct tg3 *tp)
3423 {
3424         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3425                                       (TG3_RX_RING_SIZE +
3426                                        TG3_RX_JUMBO_RING_SIZE)) +
3427                                      (sizeof(struct tx_ring_info) *
3428                                       TG3_TX_RING_SIZE),
3429                                      GFP_KERNEL);
3430         if (!tp->rx_std_buffers)
3431                 return -ENOMEM;
3432
3433         memset(tp->rx_std_buffers, 0,
3434                (sizeof(struct ring_info) *
3435                 (TG3_RX_RING_SIZE +
3436                  TG3_RX_JUMBO_RING_SIZE)) +
3437                (sizeof(struct tx_ring_info) *
3438                 TG3_TX_RING_SIZE));
3439
3440         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3441         tp->tx_buffers = (struct tx_ring_info *)
3442                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3443
3444         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3445                                           &tp->rx_std_mapping);
3446         if (!tp->rx_std)
3447                 goto err_out;
3448
3449         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3450                                             &tp->rx_jumbo_mapping);
3451
3452         if (!tp->rx_jumbo)
3453                 goto err_out;
3454
3455         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3456                                           &tp->rx_rcb_mapping);
3457         if (!tp->rx_rcb)
3458                 goto err_out;
3459
3460         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3461                 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3462                                                    &tp->tx_desc_mapping);
3463                 if (!tp->tx_ring)
3464                         goto err_out;
3465         } else {
3466                 tp->tx_ring = NULL;
3467                 tp->tx_desc_mapping = 0;
3468         }
3469
3470         tp->hw_status = pci_alloc_consistent(tp->pdev,
3471                                              TG3_HW_STATUS_SIZE,
3472                                              &tp->status_mapping);
3473         if (!tp->hw_status)
3474                 goto err_out;
3475
3476         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3477                                             sizeof(struct tg3_hw_stats),
3478                                             &tp->stats_mapping);
3479         if (!tp->hw_stats)
3480                 goto err_out;
3481
3482         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3483         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3484
3485         return 0;
3486
3487 err_out:
3488         tg3_free_consistent(tp);
3489         return -ENOMEM;
3490 }
3491
3492 #define MAX_WAIT_CNT 1000
3493
3494 /* To stop a block, clear the enable bit and poll till it
3495  * clears.  tp->lock is held.
3496  */
3497 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3498 {
3499         unsigned int i;
3500         u32 val;
3501
3502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3503             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3504                 switch (ofs) {
3505                 case RCVLSC_MODE:
3506                 case DMAC_MODE:
3507                 case MBFREE_MODE:
3508                 case BUFMGR_MODE:
3509                 case MEMARB_MODE:
3510                         /* We can't enable/disable these bits of the
3511                          * 5705/5750, just say success.
3512                          */
3513                         return 0;
3514
3515                 default:
3516                         break;
3517                 };
3518         }
3519
3520         val = tr32(ofs);
3521         val &= ~enable_bit;
3522         tw32_f(ofs, val);
3523
3524         for (i = 0; i < MAX_WAIT_CNT; i++) {
3525                 udelay(100);
3526                 val = tr32(ofs);
3527                 if ((val & enable_bit) == 0)
3528                         break;
3529         }
3530
3531         if (i == MAX_WAIT_CNT) {
3532                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3533                        "ofs=%lx enable_bit=%x\n",
3534                        ofs, enable_bit);
3535                 return -ENODEV;
3536         }
3537
3538         return 0;
3539 }
3540
3541 /* tp->lock is held. */
3542 static int tg3_abort_hw(struct tg3 *tp)
3543 {
3544         int i, err;
3545
3546         tg3_disable_ints(tp);
3547
3548         tp->rx_mode &= ~RX_MODE_ENABLE;
3549         tw32_f(MAC_RX_MODE, tp->rx_mode);
3550         udelay(10);
3551
3552         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3553         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3554         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3555         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3556         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3557         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3558
3559         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3560         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3561         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3562         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3563         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3564         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3565         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3566         if (err)
3567                 goto out;
3568
3569         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3570         tw32_f(MAC_MODE, tp->mac_mode);
3571         udelay(40);
3572
3573         tp->tx_mode &= ~TX_MODE_ENABLE;
3574         tw32_f(MAC_TX_MODE, tp->tx_mode);
3575
3576         for (i = 0; i < MAX_WAIT_CNT; i++) {
3577                 udelay(100);
3578                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3579                         break;
3580         }
3581         if (i >= MAX_WAIT_CNT) {
3582                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3583                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3584                        tp->dev->name, tr32(MAC_TX_MODE));
3585                 return -ENODEV;
3586         }
3587
3588         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3589         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3590         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3591
3592         tw32(FTQ_RESET, 0xffffffff);
3593         tw32(FTQ_RESET, 0x00000000);
3594
3595         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3596         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3597         if (err)
3598                 goto out;
3599
3600         if (tp->hw_status)
3601                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3602         if (tp->hw_stats)
3603                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3604
3605 out:
3606         return err;
3607 }
3608
3609 /* tp->lock is held. */
3610 static int tg3_nvram_lock(struct tg3 *tp)
3611 {
3612         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3613                 int i;
3614
3615                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3616                 for (i = 0; i < 8000; i++) {
3617                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3618                                 break;
3619                         udelay(20);
3620                 }
3621                 if (i == 8000)
3622                         return -ENODEV;
3623         }
3624         return 0;
3625 }
3626
3627 /* tp->lock is held. */
3628 static void tg3_nvram_unlock(struct tg3 *tp)
3629 {
3630         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3631                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3632 }
3633
3634 /* tp->lock is held. */
3635 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3636 {
3637         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3638                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3639
3640         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3641                 switch (kind) {
3642                 case RESET_KIND_INIT:
3643                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3644                                       DRV_STATE_START);
3645                         break;
3646
3647                 case RESET_KIND_SHUTDOWN:
3648                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3649                                       DRV_STATE_UNLOAD);
3650                         break;
3651
3652                 case RESET_KIND_SUSPEND:
3653                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3654                                       DRV_STATE_SUSPEND);
3655                         break;
3656
3657                 default:
3658                         break;
3659                 };
3660         }
3661 }
3662
3663 /* tp->lock is held. */
3664 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3665 {
3666         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3667                 switch (kind) {
3668                 case RESET_KIND_INIT:
3669                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3670                                       DRV_STATE_START_DONE);
3671                         break;
3672
3673                 case RESET_KIND_SHUTDOWN:
3674                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3675                                       DRV_STATE_UNLOAD_DONE);
3676                         break;
3677
3678                 default:
3679                         break;
3680                 };
3681         }
3682 }
3683
3684 /* tp->lock is held. */
3685 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3686 {
3687         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3688                 switch (kind) {
3689                 case RESET_KIND_INIT:
3690                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3691                                       DRV_STATE_START);
3692                         break;
3693
3694                 case RESET_KIND_SHUTDOWN:
3695                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3696                                       DRV_STATE_UNLOAD);
3697                         break;
3698
3699                 case RESET_KIND_SUSPEND:
3700                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3701                                       DRV_STATE_SUSPEND);
3702                         break;
3703
3704                 default:
3705                         break;
3706                 };
3707         }
3708 }
3709
3710 /* tp->lock is held. */
3711 static int tg3_chip_reset(struct tg3 *tp)
3712 {
3713         u32 val;
3714         u32 flags_save;
3715         int i;
3716
3717         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704))
3718                 tg3_nvram_lock(tp);
3719
3720         /*
3721          * We must avoid the readl() that normally takes place.
3722          * It locks machines, causes machine checks, and other
3723          * fun things.  So, temporarily disable the 5701
3724          * hardware workaround, while we do the reset.
3725          */
3726         flags_save = tp->tg3_flags;
3727         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3728
3729         /* do the reset */
3730         val = GRC_MISC_CFG_CORECLK_RESET;
3731
3732         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3733                 if (tr32(0x7e2c) == 0x60) {
3734                         tw32(0x7e2c, 0x20);
3735                 }
3736                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3737                         tw32(GRC_MISC_CFG, (1 << 29));
3738                         val |= (1 << 29);
3739                 }
3740         }
3741
3742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3743             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3744                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3745         tw32(GRC_MISC_CFG, val);
3746
3747         /* restore 5701 hardware bug workaround flag */
3748         tp->tg3_flags = flags_save;
3749
3750         /* Flush PCI posted writes.  The normal MMIO registers
3751          * are inaccessible at this time so this is the only
3752          * way to make this reliably.  I tried to use indirect
3753          * register read/write but this upset some 5701 variants.
3754          */
3755         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3756
3757         udelay(120);
3758
3759         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3760                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3761                         int i;
3762                         u32 cfg_val;
3763
3764                         /* Wait for link training to complete.  */
3765                         for (i = 0; i < 5000; i++)
3766                                 udelay(100);
3767
3768                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3769                         pci_write_config_dword(tp->pdev, 0xc4,
3770                                                cfg_val | (1 << 15));
3771                 }
3772                 /* Set PCIE max payload size and clear error status.  */
3773                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3774         }
3775
3776         /* Re-enable indirect register accesses. */
3777         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3778                                tp->misc_host_ctrl);
3779
3780         /* Set MAX PCI retry to zero. */
3781         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3782         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3783             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3784                 val |= PCISTATE_RETRY_SAME_DMA;
3785         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3786
3787         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3788
3789         /* Make sure PCI-X relaxed ordering bit is clear. */
3790         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3791         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3792         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3793
3794         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3795
3796         tw32(GRC_MODE, tp->grc_mode);
3797
3798         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3799                 u32 val = tr32(0xc4);
3800
3801                 tw32(0xc4, val | (1 << 15));
3802         }
3803
3804         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3805             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3806                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3807                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3808                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3809                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3810         }
3811
3812         if (tp->phy_id == PHY_ID_SERDES) {
3813                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3814                 tw32_f(MAC_MODE, tp->mac_mode);
3815         } else
3816                 tw32_f(MAC_MODE, 0);
3817         udelay(40);
3818
3819         /* Wait for firmware initialization to complete. */
3820         for (i = 0; i < 100000; i++) {
3821                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3822                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3823                         break;
3824                 udelay(10);
3825         }
3826         if (i >= 100000 &&
3827             !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3828                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3829                        "firmware will not restart magic=%08x\n",
3830                        tp->dev->name, val);
3831                 return -ENODEV;
3832         }
3833
3834         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3835             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3836                 u32 val = tr32(0x7c00);
3837
3838                 tw32(0x7c00, val | (1 << 25));
3839         }
3840
3841         /* Reprobe ASF enable state.  */
3842         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3843         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3844         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3845         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3846                 u32 nic_cfg;
3847
3848                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3849                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3850                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3851                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3852                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3853                 }
3854         }
3855
3856         return 0;
3857 }
3858
3859 /* tp->lock is held. */
3860 static void tg3_stop_fw(struct tg3 *tp)
3861 {
3862         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3863                 u32 val;
3864                 int i;
3865
3866                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3867                 val = tr32(GRC_RX_CPU_EVENT);
3868                 val |= (1 << 14);
3869                 tw32(GRC_RX_CPU_EVENT, val);
3870
3871                 /* Wait for RX cpu to ACK the event.  */
3872                 for (i = 0; i < 100; i++) {
3873                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3874                                 break;
3875                         udelay(1);
3876                 }
3877         }
3878 }
3879
3880 /* tp->lock is held. */
3881 static int tg3_halt(struct tg3 *tp)
3882 {
3883         int err;
3884
3885         tg3_stop_fw(tp);
3886
3887         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3888
3889         tg3_abort_hw(tp);
3890         err = tg3_chip_reset(tp);
3891
3892         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3893         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3894
3895         if (err)
3896                 return err;
3897
3898         return 0;
3899 }
3900
3901 #define TG3_FW_RELEASE_MAJOR    0x0
3902 #define TG3_FW_RELASE_MINOR     0x0
3903 #define TG3_FW_RELEASE_FIX      0x0
3904 #define TG3_FW_START_ADDR       0x08000000
3905 #define TG3_FW_TEXT_ADDR        0x08000000
3906 #define TG3_FW_TEXT_LEN         0x9c0
3907 #define TG3_FW_RODATA_ADDR      0x080009c0
3908 #define TG3_FW_RODATA_LEN       0x60
3909 #define TG3_FW_DATA_ADDR        0x08000a40
3910 #define TG3_FW_DATA_LEN         0x20
3911 #define TG3_FW_SBSS_ADDR        0x08000a60
3912 #define TG3_FW_SBSS_LEN         0xc
3913 #define TG3_FW_BSS_ADDR         0x08000a70
3914 #define TG3_FW_BSS_LEN          0x10
3915
3916 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3917         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3918         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3919         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3920         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3921         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3922         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3923         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3924         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3925         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3926         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3927         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3928         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3929         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3930         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3931         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3932         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3933         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3934         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3935         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3936         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3937         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3938         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3939         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3940         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3941         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3942         0, 0, 0, 0, 0, 0,
3943         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3944         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3945         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3946         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3947         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3948         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3949         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3950         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3951         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3952         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3953         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3954         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3955         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3956         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3957         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3958         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3959         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3960         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3961         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3962         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3963         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3964         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3965         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3966         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3967         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3968         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3969         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3970         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3971         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3972         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3973         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3974         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3975         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3976         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3977         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3978         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3979         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3980         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3981         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3982         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3983         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3984         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3985         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3986         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3987         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3988         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3989         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3990         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3991         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3992         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3993         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3994         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3995         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3996         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3997         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3998         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3999         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4000         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4001         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4002         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4003         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4004         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4005         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4006         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4007         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4008 };
4009
4010 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4011         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4012         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4013         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4014         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4015         0x00000000
4016 };
4017
4018 #if 0 /* All zeros, don't eat up space with it. */
4019 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4020         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4021         0x00000000, 0x00000000, 0x00000000, 0x00000000
4022 };
4023 #endif
4024
4025 #define RX_CPU_SCRATCH_BASE     0x30000
4026 #define RX_CPU_SCRATCH_SIZE     0x04000
4027 #define TX_CPU_SCRATCH_BASE     0x34000
4028 #define TX_CPU_SCRATCH_SIZE     0x04000
4029
4030 /* tp->lock is held. */
4031 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4032 {
4033         int i;
4034
4035         if (offset == TX_CPU_BASE &&
4036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4037                 BUG();
4038
4039         if (offset == RX_CPU_BASE) {
4040                 for (i = 0; i < 10000; i++) {
4041                         tw32(offset + CPU_STATE, 0xffffffff);
4042                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4043                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4044                                 break;
4045                 }
4046
4047                 tw32(offset + CPU_STATE, 0xffffffff);
4048                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4049                 udelay(10);
4050         } else {
4051                 for (i = 0; i < 10000; i++) {
4052                         tw32(offset + CPU_STATE, 0xffffffff);
4053                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4054                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4055                                 break;
4056                 }
4057         }
4058
4059         if (i >= 10000) {
4060                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4061                        "and %s CPU\n",
4062                        tp->dev->name,
4063                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4064                 return -ENODEV;
4065         }
4066         return 0;
4067 }
4068
4069 struct fw_info {
4070         unsigned int text_base;
4071         unsigned int text_len;
4072         u32 *text_data;
4073         unsigned int rodata_base;
4074         unsigned int rodata_len;
4075         u32 *rodata_data;
4076         unsigned int data_base;
4077         unsigned int data_len;
4078         u32 *data_data;
4079 };
4080
4081 /* tp->lock is held. */
4082 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4083                                  int cpu_scratch_size, struct fw_info *info)
4084 {
4085         int err, i;
4086         u32 orig_tg3_flags = tp->tg3_flags;
4087         void (*write_op)(struct tg3 *, u32, u32);
4088
4089         if (cpu_base == TX_CPU_BASE &&
4090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4091                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4092                        "TX cpu firmware on %s which is 5705.\n",
4093                        tp->dev->name);
4094                 return -EINVAL;
4095         }
4096
4097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4098                 write_op = tg3_write_mem;
4099         else
4100                 write_op = tg3_write_indirect_reg32;
4101
4102         /* Force use of PCI config space for indirect register
4103          * write calls.
4104          */
4105         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4106
4107         err = tg3_halt_cpu(tp, cpu_base);
4108         if (err)
4109                 goto out;
4110
4111         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4112                 write_op(tp, cpu_scratch_base + i, 0);
4113         tw32(cpu_base + CPU_STATE, 0xffffffff);
4114         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4115         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4116                 write_op(tp, (cpu_scratch_base +
4117                               (info->text_base & 0xffff) +
4118                               (i * sizeof(u32))),
4119                          (info->text_data ?
4120                           info->text_data[i] : 0));
4121         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4122                 write_op(tp, (cpu_scratch_base +
4123                               (info->rodata_base & 0xffff) +
4124                               (i * sizeof(u32))),
4125                          (info->rodata_data ?
4126                           info->rodata_data[i] : 0));
4127         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4128                 write_op(tp, (cpu_scratch_base +
4129                               (info->data_base & 0xffff) +
4130                               (i * sizeof(u32))),
4131                          (info->data_data ?
4132                           info->data_data[i] : 0));
4133
4134         err = 0;
4135
4136 out:
4137         tp->tg3_flags = orig_tg3_flags;
4138         return err;
4139 }
4140
4141 /* tp->lock is held. */
4142 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4143 {
4144         struct fw_info info;
4145         int err, i;
4146
4147         info.text_base = TG3_FW_TEXT_ADDR;
4148         info.text_len = TG3_FW_TEXT_LEN;
4149         info.text_data = &tg3FwText[0];
4150         info.rodata_base = TG3_FW_RODATA_ADDR;
4151         info.rodata_len = TG3_FW_RODATA_LEN;
4152         info.rodata_data = &tg3FwRodata[0];
4153         info.data_base = TG3_FW_DATA_ADDR;
4154         info.data_len = TG3_FW_DATA_LEN;
4155         info.data_data = NULL;
4156
4157         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4158                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4159                                     &info);
4160         if (err)
4161                 return err;
4162
4163         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4164                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4165                                     &info);
4166         if (err)
4167                 return err;
4168
4169         /* Now startup only the RX cpu. */
4170         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4171         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4172
4173         for (i = 0; i < 5; i++) {
4174                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4175                         break;
4176                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4177                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4178                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4179                 udelay(1000);
4180         }
4181         if (i >= 5) {
4182                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4183                        "to set RX CPU PC, is %08x should be %08x\n",
4184                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4185                        TG3_FW_TEXT_ADDR);
4186                 return -ENODEV;
4187         }
4188         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4189         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4190
4191         return 0;
4192 }
4193
4194 #if TG3_TSO_SUPPORT != 0
4195
4196 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4197 #define TG3_TSO_FW_RELASE_MINOR         0x6
4198 #define TG3_TSO_FW_RELEASE_FIX          0x0
4199 #define TG3_TSO_FW_START_ADDR           0x08000000
4200 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4201 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4202 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4203 #define TG3_TSO_FW_RODATA_LEN           0x60
4204 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4205 #define TG3_TSO_FW_DATA_LEN             0x30
4206 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4207 #define TG3_TSO_FW_SBSS_LEN             0x2c
4208 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4209 #define TG3_TSO_FW_BSS_LEN              0x894
4210
4211 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4212         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4213         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4214         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4215         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4216         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4217         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4218         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4219         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4220         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4221         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4222         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4223         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4224         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4225         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4226         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4227         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4228         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4229         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4230         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4231         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4232         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4233         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4234         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4235         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4236         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4237         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4238         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4239         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4240         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4241         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4242         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4243         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4244         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4245         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4246         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4247         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4248         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4249         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4250         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4251         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4252         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4253         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4254         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4255         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4256         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4257         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4258         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4259         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4260         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4261         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4262         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4263         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4264         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4265         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4266         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4267         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4268         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4269         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4270         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4271         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4272         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4273         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4274         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4275         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4276         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4277         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4278         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4279         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4280         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4281         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4282         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4283         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4284         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4285         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4286         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4287         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4288         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4289         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4290         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4291         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4292         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4293         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4294         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4295         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4296         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4297         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4298         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4299         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4300         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4301         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4302         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4303         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4304         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4305         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4306         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4307         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4308         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4309         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4310         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4311         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4312         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4313         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4314         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4315         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4316         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4317         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4318         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4319         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4320         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4321         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4322         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4323         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4324         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4325         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4326         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4327         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4328         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4329         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4330         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4331         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4332         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4333         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4334         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4335         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4336         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4337         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4338         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4339         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4340         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4341         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4342         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4343         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4344         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4345         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4346         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4347         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4348         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4349         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4350         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4351         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4352         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4353         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4354         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4355         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4356         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4357         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4358         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4359         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4360         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4361         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4362         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4363         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4364         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4365         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4366         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4367         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4368         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4369         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4370         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4371         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4372         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4373         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4374         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4375         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4376         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4377         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4378         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4379         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4380         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4381         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4382         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4383         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4384         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4385         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4386         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4387         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4388         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4389         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4390         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4391         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4392         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4393         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4394         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4395         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4396         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4397         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4398         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4399         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4400         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4401         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4402         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4403         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4404         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4405         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4406         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4407         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4408         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4409         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4410         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4411         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4412         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4413         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4414         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4415         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4416         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4417         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4418         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4419         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4420         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4421         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4422         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4423         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4424         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4425         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4426         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4427         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4428         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4429         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4430         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4431         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4432         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4433         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4434         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4435         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4436         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4437         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4438         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4439         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4440         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4441         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4442         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4443         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4444         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4445         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4446         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4447         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4448         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4449         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4450         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4451         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4452         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4453         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4454         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4455         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4456         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4457         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4458         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4459         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4460         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4461         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4462         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4463         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4464         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4465         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4466         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4467         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4468         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4469         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4470         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4471         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4472         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4473         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4474         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4475         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4476         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4477         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4478         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4479         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4480         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4481         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4482         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4483         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4484         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4485         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4486         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4487         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4488         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4489         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4490         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4491         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4492         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4493         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4494         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4495         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4496 };
4497
4498 u32 tg3TsoFwRodata[] = {
4499         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4500         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4501         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4502         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4503         0x00000000,
4504 };
4505
4506 u32 tg3TsoFwData[] = {
4507         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4508         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4509         0x00000000,
4510 };
4511
4512 /* 5705 needs a special version of the TSO firmware.  */
4513 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4514 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4515 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4516 #define TG3_TSO5_FW_START_ADDR          0x00010000
4517 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4518 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4519 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4520 #define TG3_TSO5_FW_RODATA_LEN          0x50
4521 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4522 #define TG3_TSO5_FW_DATA_LEN            0x20
4523 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4524 #define TG3_TSO5_FW_SBSS_LEN            0x28
4525 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4526 #define TG3_TSO5_FW_BSS_LEN             0x88
4527
4528 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4529         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4530         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4531         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4532         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4533         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4534         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4535         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4536         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4537         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4538         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4539         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4540         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4541         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4542         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4543         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4544         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4545         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4546         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4547         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4548         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4549         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4550         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4551         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4552         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4553         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4554         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4555         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4556         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4557         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4558         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4559         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4560         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4561         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4562         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4563         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4564         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4565         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4566         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4567         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4568         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4569         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4570         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4571         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4572         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4573         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4574         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4575         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4576         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4577         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4578         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4579         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4580         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4581         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4582         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4583         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4584         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4585         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4586         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4587         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4588         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4589         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4590         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4591         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4592         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4593         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4594         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4595         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4596         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4597         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4598         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4599         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4600         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4601         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4602         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4603         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4604         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4605         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4606         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4607         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4608         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4609         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4610         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4611         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4612         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4613         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4614         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4615         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4616         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4617         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4618         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4619         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4620         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4621         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4622         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4623         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4624         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4625         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4626         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4627         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4628         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4629         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4630         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4631         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4632         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4633         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4634         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4635         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4636         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4637         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4638         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4639         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4640         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4641         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4642         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4643         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4644         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4645         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4646         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4647         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4648         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4649         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4650         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4651         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4652         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4653         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4654         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4655         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4656         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4657         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4658         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4659         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4660         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4661         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4662         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4663         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4664         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4665         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4666         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4667         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4668         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4669         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4670         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4671         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4672         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4673         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4674         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4675         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4676         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4677         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4678         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4679         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4680         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4681         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4682         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4683         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4684         0x00000000, 0x00000000, 0x00000000,
4685 };
4686
4687 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4688         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4689         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4690         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4691         0x00000000, 0x00000000, 0x00000000,
4692 };
4693
4694 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4695         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4696         0x00000000, 0x00000000, 0x00000000,
4697 };
4698
4699 /* tp->lock is held. */
4700 static int tg3_load_tso_firmware(struct tg3 *tp)
4701 {
4702         struct fw_info info;
4703         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4704         int err, i;
4705
4706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4707                 return 0;
4708
4709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4710                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4711                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4712                 info.text_data = &tg3Tso5FwText[0];
4713                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4714                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4715                 info.rodata_data = &tg3Tso5FwRodata[0];
4716                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4717                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4718                 info.data_data = &tg3Tso5FwData[0];
4719                 cpu_base = RX_CPU_BASE;
4720                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4721                 cpu_scratch_size = (info.text_len +
4722                                     info.rodata_len +
4723                                     info.data_len +
4724                                     TG3_TSO5_FW_SBSS_LEN +
4725                                     TG3_TSO5_FW_BSS_LEN);
4726         } else {
4727                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4728                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4729                 info.text_data = &tg3TsoFwText[0];
4730                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4731                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4732                 info.rodata_data = &tg3TsoFwRodata[0];
4733                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4734                 info.data_len = TG3_TSO_FW_DATA_LEN;
4735                 info.data_data = &tg3TsoFwData[0];
4736                 cpu_base = TX_CPU_BASE;
4737                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4738                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4739         }
4740
4741         err = tg3_load_firmware_cpu(tp, cpu_base,
4742                                     cpu_scratch_base, cpu_scratch_size,
4743                                     &info);
4744         if (err)
4745                 return err;
4746
4747         /* Now startup the cpu. */
4748         tw32(cpu_base + CPU_STATE, 0xffffffff);
4749         tw32_f(cpu_base + CPU_PC,    info.text_base);
4750
4751         for (i = 0; i < 5; i++) {
4752                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4753                         break;
4754                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4755                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4756                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4757                 udelay(1000);
4758         }
4759         if (i >= 5) {
4760                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4761                        "to set CPU PC, is %08x should be %08x\n",
4762                        tp->dev->name, tr32(cpu_base + CPU_PC),
4763                        info.text_base);
4764                 return -ENODEV;
4765         }
4766         tw32(cpu_base + CPU_STATE, 0xffffffff);
4767         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4768         return 0;
4769 }
4770
4771 #endif /* TG3_TSO_SUPPORT != 0 */
4772
4773 /* tp->lock is held. */
4774 static void __tg3_set_mac_addr(struct tg3 *tp)
4775 {
4776         u32 addr_high, addr_low;
4777         int i;
4778
4779         addr_high = ((tp->dev->dev_addr[0] << 8) |
4780                      tp->dev->dev_addr[1]);
4781         addr_low = ((tp->dev->dev_addr[2] << 24) |
4782                     (tp->dev->dev_addr[3] << 16) |
4783                     (tp->dev->dev_addr[4] <<  8) |
4784                     (tp->dev->dev_addr[5] <<  0));
4785         for (i = 0; i < 4; i++) {
4786                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4787                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4788         }
4789
4790         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4791             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4792             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4793                 for (i = 0; i < 12; i++) {
4794                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4795                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4796                 }
4797         }
4798
4799         addr_high = (tp->dev->dev_addr[0] +
4800                      tp->dev->dev_addr[1] +
4801                      tp->dev->dev_addr[2] +
4802                      tp->dev->dev_addr[3] +
4803                      tp->dev->dev_addr[4] +
4804                      tp->dev->dev_addr[5]) &
4805                 TX_BACKOFF_SEED_MASK;
4806         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4807 }
4808
4809 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4810 {
4811         struct tg3 *tp = netdev_priv(dev);
4812         struct sockaddr *addr = p;
4813
4814         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4815
4816         spin_lock_irq(&tp->lock);
4817         __tg3_set_mac_addr(tp);
4818         spin_unlock_irq(&tp->lock);
4819
4820         return 0;
4821 }
4822
4823 /* tp->lock is held. */
4824 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4825                            dma_addr_t mapping, u32 maxlen_flags,
4826                            u32 nic_addr)
4827 {
4828         tg3_write_mem(tp,
4829                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4830                       ((u64) mapping >> 32));
4831         tg3_write_mem(tp,
4832                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4833                       ((u64) mapping & 0xffffffff));
4834         tg3_write_mem(tp,
4835                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4836                        maxlen_flags);
4837
4838         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4839                 tg3_write_mem(tp,
4840                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4841                               nic_addr);
4842 }
4843
4844 static void __tg3_set_rx_mode(struct net_device *);
4845
4846 /* tp->lock is held. */
4847 static int tg3_reset_hw(struct tg3 *tp)
4848 {
4849         u32 val, rdmac_mode;
4850         int i, err, limit;
4851
4852         tg3_disable_ints(tp);
4853
4854         tg3_stop_fw(tp);
4855
4856         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4857
4858         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4859                 err = tg3_abort_hw(tp);
4860                 if (err)
4861                         return err;
4862         }
4863
4864         err = tg3_chip_reset(tp);
4865         if (err)
4866                 return err;
4867
4868         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4869
4870         /* This works around an issue with Athlon chipsets on
4871          * B3 tigon3 silicon.  This bit has no effect on any
4872          * other revision.  But do not set this on PCI Express
4873          * chips.
4874          */
4875         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4876                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4877         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4878
4879         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4880             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4881                 val = tr32(TG3PCI_PCISTATE);
4882                 val |= PCISTATE_RETRY_SAME_DMA;
4883                 tw32(TG3PCI_PCISTATE, val);
4884         }
4885
4886         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4887                 /* Enable some hw fixes.  */
4888                 val = tr32(TG3PCI_MSI_DATA);
4889                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4890                 tw32(TG3PCI_MSI_DATA, val);
4891         }
4892
4893         /* Descriptor ring init may make accesses to the
4894          * NIC SRAM area to setup the TX descriptors, so we
4895          * can only do this after the hardware has been
4896          * successfully reset.
4897          */
4898         tg3_init_rings(tp);
4899
4900         /* This value is determined during the probe time DMA
4901          * engine test, tg3_test_dma.
4902          */
4903         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4904
4905         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4906                           GRC_MODE_4X_NIC_SEND_RINGS |
4907                           GRC_MODE_NO_TX_PHDR_CSUM |
4908                           GRC_MODE_NO_RX_PHDR_CSUM);
4909         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4910                 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4911         else
4912                 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4913         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4914                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4915         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4916                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4917
4918         tw32(GRC_MODE,
4919              tp->grc_mode |
4920              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4921
4922         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4923         val = tr32(GRC_MISC_CFG);
4924         val &= ~0xff;
4925         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4926         tw32(GRC_MISC_CFG, val);
4927
4928         /* Initialize MBUF/DESC pool. */
4929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4930                 /* Do nothing.  */
4931         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4932                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4933                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4934                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4935                 else
4936                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4937                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4938                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4939         }
4940 #if TG3_TSO_SUPPORT != 0
4941         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4942                 int fw_len;
4943
4944                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4945                           TG3_TSO5_FW_RODATA_LEN +
4946                           TG3_TSO5_FW_DATA_LEN +
4947                           TG3_TSO5_FW_SBSS_LEN +
4948                           TG3_TSO5_FW_BSS_LEN);
4949                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4950                 tw32(BUFMGR_MB_POOL_ADDR,
4951                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4952                 tw32(BUFMGR_MB_POOL_SIZE,
4953                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4954         }
4955 #endif
4956
4957         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4958                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4959                      tp->bufmgr_config.mbuf_read_dma_low_water);
4960                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4961                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4962                 tw32(BUFMGR_MB_HIGH_WATER,
4963                      tp->bufmgr_config.mbuf_high_water);
4964         } else {
4965                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4966                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4967                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4968                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4969                 tw32(BUFMGR_MB_HIGH_WATER,
4970                      tp->bufmgr_config.mbuf_high_water_jumbo);
4971         }
4972         tw32(BUFMGR_DMA_LOW_WATER,
4973              tp->bufmgr_config.dma_low_water);
4974         tw32(BUFMGR_DMA_HIGH_WATER,
4975              tp->bufmgr_config.dma_high_water);
4976
4977         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4978         for (i = 0; i < 2000; i++) {
4979                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4980                         break;
4981                 udelay(10);
4982         }
4983         if (i >= 2000) {
4984                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4985                        tp->dev->name);
4986                 return -ENODEV;
4987         }
4988
4989         /* Setup replenish threshold. */
4990         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4991
4992         /* Initialize TG3_BDINFO's at:
4993          *  RCVDBDI_STD_BD:     standard eth size rx ring
4994          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
4995          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
4996          *
4997          * like so:
4998          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
4999          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5000          *                              ring attribute flags
5001          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5002          *
5003          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5004          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5005          *
5006          * The size of each ring is fixed in the firmware, but the location is
5007          * configurable.
5008          */
5009         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5010              ((u64) tp->rx_std_mapping >> 32));
5011         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5012              ((u64) tp->rx_std_mapping & 0xffffffff));
5013         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5014              NIC_SRAM_RX_BUFFER_DESC);
5015
5016         /* Don't even try to program the JUMBO/MINI buffer descriptor
5017          * configs on 5705.
5018          */
5019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5021                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5022                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5023         } else {
5024                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5025                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5026
5027                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5028                      BDINFO_FLAGS_DISABLED);
5029
5030                 /* Setup replenish threshold. */
5031                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5032
5033                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5034                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5035                              ((u64) tp->rx_jumbo_mapping >> 32));
5036                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5037                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5038                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5039                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5040                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5041                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5042                 } else {
5043                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5044                              BDINFO_FLAGS_DISABLED);
5045                 }
5046
5047         }
5048
5049         /* There is only one send ring on 5705/5750, no need to explicitly
5050          * disable the others.
5051          */
5052         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5053             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5054                 /* Clear out send RCB ring in SRAM. */
5055                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5056                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5057                                       BDINFO_FLAGS_DISABLED);
5058         }
5059
5060         tp->tx_prod = 0;
5061         tp->tx_cons = 0;
5062         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5063         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5064
5065         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
5066                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5067                                tp->tx_desc_mapping,
5068                                (TG3_TX_RING_SIZE <<
5069                                 BDINFO_FLAGS_MAXLEN_SHIFT),
5070                                NIC_SRAM_TX_BUFFER_DESC);
5071         } else {
5072                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5073                                0,
5074                                BDINFO_FLAGS_DISABLED,
5075                                NIC_SRAM_TX_BUFFER_DESC);
5076         }
5077
5078         /* There is only one receive return ring on 5705/5750, no need
5079          * to explicitly disable the others.
5080          */
5081         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5082             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5083                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5084                      i += TG3_BDINFO_SIZE) {
5085                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5086                                       BDINFO_FLAGS_DISABLED);
5087                 }
5088         }
5089
5090         tp->rx_rcb_ptr = 0;
5091         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5092
5093         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5094                        tp->rx_rcb_mapping,
5095                        (TG3_RX_RCB_RING_SIZE(tp) <<
5096                         BDINFO_FLAGS_MAXLEN_SHIFT),
5097                        0);
5098
5099         tp->rx_std_ptr = tp->rx_pending;
5100         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5101                      tp->rx_std_ptr);
5102
5103         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5104                                                 tp->rx_jumbo_pending : 0;
5105         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5106                      tp->rx_jumbo_ptr);
5107
5108         /* Initialize MAC address and backoff seed. */
5109         __tg3_set_mac_addr(tp);
5110
5111         /* MTU + ethernet header + FCS + optional VLAN tag */
5112         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5113
5114         /* The slot time is changed by tg3_setup_phy if we
5115          * run at gigabit with half duplex.
5116          */
5117         tw32(MAC_TX_LENGTHS,
5118              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5119              (6 << TX_LENGTHS_IPG_SHIFT) |
5120              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5121
5122         /* Receive rules. */
5123         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5124         tw32(RCVLPC_CONFIG, 0x0181);
5125
5126         /* Calculate RDMAC_MODE setting early, we need it to determine
5127          * the RCVLPC_STATE_ENABLE mask.
5128          */
5129         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5130                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5131                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5132                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5133                       RDMAC_MODE_LNGREAD_ENAB);
5134         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5135                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5136         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5137              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5138             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5139                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5140                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5141                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5142                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5143                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5144                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5145                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5146                 }
5147         }
5148
5149 #if TG3_TSO_SUPPORT != 0
5150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5151                 rdmac_mode |= (1 << 27);
5152 #endif
5153
5154         /* Receive/send statistics. */
5155         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5156             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5157                 val = tr32(RCVLPC_STATS_ENABLE);
5158                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5159                 tw32(RCVLPC_STATS_ENABLE, val);
5160         } else {
5161                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5162         }
5163         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5164         tw32(SNDDATAI_STATSENAB, 0xffffff);
5165         tw32(SNDDATAI_STATSCTRL,
5166              (SNDDATAI_SCTRL_ENABLE |
5167               SNDDATAI_SCTRL_FASTUPD));
5168
5169         /* Setup host coalescing engine. */
5170         tw32(HOSTCC_MODE, 0);
5171         for (i = 0; i < 2000; i++) {
5172                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5173                         break;
5174                 udelay(10);
5175         }
5176
5177         tw32(HOSTCC_RXCOL_TICKS, 0);
5178         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5179         tw32(HOSTCC_RXMAX_FRAMES, 1);
5180         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5181         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5182             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5183                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5184                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5185         }
5186         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5187         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5188
5189         /* set status block DMA address */
5190         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5191              ((u64) tp->status_mapping >> 32));
5192         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5193              ((u64) tp->status_mapping & 0xffffffff));
5194
5195         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5196             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5197                 /* Status/statistics block address.  See tg3_timer,
5198                  * the tg3_periodic_fetch_stats call there, and
5199                  * tg3_get_stats to see how this works for 5705/5750 chips.
5200                  */
5201                 tw32(HOSTCC_STAT_COAL_TICKS,
5202                      DEFAULT_STAT_COAL_TICKS);
5203                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5204                      ((u64) tp->stats_mapping >> 32));
5205                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5206                      ((u64) tp->stats_mapping & 0xffffffff));
5207                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5208                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5209         }
5210
5211         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5212
5213         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5214         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5215         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5216             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5217                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5218
5219         /* Clear statistics/status block in chip, and status block in ram. */
5220         for (i = NIC_SRAM_STATS_BLK;
5221              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5222              i += sizeof(u32)) {
5223                 tg3_write_mem(tp, i, 0);
5224                 udelay(40);
5225         }
5226         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5227
5228         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5229                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5230         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5231         udelay(40);
5232
5233         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5235                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5236                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5237         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5238         udelay(100);
5239
5240         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5241         tr32(MAILBOX_INTERRUPT_0);
5242
5243         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5244             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5245                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5246                 udelay(40);
5247         }
5248
5249         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5250                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5251                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5252                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5253                WDMAC_MODE_LNGREAD_ENAB);
5254
5255         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5256              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5257             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5258                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5259                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5260                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5261                         /* nothing */
5262                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5263                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5264                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5265                         val |= WDMAC_MODE_RX_ACCEL;
5266                 }
5267         }
5268
5269         tw32_f(WDMAC_MODE, val);
5270         udelay(40);
5271
5272         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5273                 val = tr32(TG3PCI_X_CAPS);
5274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5275                         val &= ~PCIX_CAPS_BURST_MASK;
5276                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5277                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5278                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5279                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5280                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5281                                 val |= (tp->split_mode_max_reqs <<
5282                                         PCIX_CAPS_SPLIT_SHIFT);
5283                 }
5284                 tw32(TG3PCI_X_CAPS, val);
5285         }
5286
5287         tw32_f(RDMAC_MODE, rdmac_mode);
5288         udelay(40);
5289
5290         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5291         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5292             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5293                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5294         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5295         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5296         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5297         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5298         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5299 #if TG3_TSO_SUPPORT != 0
5300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5301                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5302 #endif
5303         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5304         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5305
5306         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5307                 err = tg3_load_5701_a0_firmware_fix(tp);
5308                 if (err)
5309                         return err;
5310         }
5311
5312 #if TG3_TSO_SUPPORT != 0
5313         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5314                 err = tg3_load_tso_firmware(tp);
5315                 if (err)
5316                         return err;
5317         }
5318 #endif
5319
5320         tp->tx_mode = TX_MODE_ENABLE;
5321         tw32_f(MAC_TX_MODE, tp->tx_mode);
5322         udelay(100);
5323
5324         tp->rx_mode = RX_MODE_ENABLE;
5325         tw32_f(MAC_RX_MODE, tp->rx_mode);
5326         udelay(10);
5327
5328         if (tp->link_config.phy_is_low_power) {
5329                 tp->link_config.phy_is_low_power = 0;
5330                 tp->link_config.speed = tp->link_config.orig_speed;
5331                 tp->link_config.duplex = tp->link_config.orig_duplex;
5332                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5333         }
5334
5335         tp->mi_mode = MAC_MI_MODE_BASE;
5336         tw32_f(MAC_MI_MODE, tp->mi_mode);
5337         udelay(80);
5338
5339         tw32(MAC_LED_CTRL, tp->led_ctrl);
5340
5341         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5342         if (tp->phy_id == PHY_ID_SERDES) {
5343                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5344                 udelay(10);
5345         }
5346         tw32_f(MAC_RX_MODE, tp->rx_mode);
5347         udelay(10);
5348
5349         if (tp->phy_id == PHY_ID_SERDES) {
5350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5351                         /* Set drive transmission level to 1.2V  */
5352                         val = tr32(MAC_SERDES_CFG);
5353                         val &= 0xfffff000;
5354                         val |= 0x880;
5355                         tw32(MAC_SERDES_CFG, val);
5356                 }
5357                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5358                         tw32(MAC_SERDES_CFG, 0x616000);
5359         }
5360
5361         /* Prevent chip from dropping frames when flow control
5362          * is enabled.
5363          */
5364         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5365
5366         err = tg3_setup_phy(tp, 1);
5367         if (err)
5368                 return err;
5369
5370         if (tp->phy_id != PHY_ID_SERDES) {
5371                 u32 tmp;
5372
5373                 /* Clear CRC stats. */
5374                 tg3_readphy(tp, 0x1e, &tmp);
5375                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5376                 tg3_readphy(tp, 0x14, &tmp);
5377         }
5378
5379         __tg3_set_rx_mode(tp->dev);
5380
5381         /* Initialize receive rules. */
5382         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5383         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5384         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5385         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5386
5387         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5389                 limit = 8;
5390         else
5391                 limit = 16;
5392         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5393                 limit -= 4;
5394         switch (limit) {
5395         case 16:
5396                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5397         case 15:
5398                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5399         case 14:
5400                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5401         case 13:
5402                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5403         case 12:
5404                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5405         case 11:
5406                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5407         case 10:
5408                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5409         case 9:
5410                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5411         case 8:
5412                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5413         case 7:
5414                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5415         case 6:
5416                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5417         case 5:
5418                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5419         case 4:
5420                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5421         case 3:
5422                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5423         case 2:
5424         case 1:
5425
5426         default:
5427                 break;
5428         };
5429
5430         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5431
5432         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5433                 tg3_enable_ints(tp);
5434
5435         return 0;
5436 }
5437
5438 /* Called at device open time to get the chip ready for
5439  * packet processing.  Invoked with tp->lock held.
5440  */
5441 static int tg3_init_hw(struct tg3 *tp)
5442 {
5443         int err;
5444
5445         /* Force the chip into D0. */
5446         err = tg3_set_power_state(tp, 0);
5447         if (err)
5448                 goto out;
5449
5450         tg3_switch_clocks(tp);
5451
5452         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5453
5454         err = tg3_reset_hw(tp);
5455
5456 out:
5457         return err;
5458 }
5459
5460 #define TG3_STAT_ADD32(PSTAT, REG) \
5461 do {    u32 __val = tr32(REG); \
5462         (PSTAT)->low += __val; \
5463         if ((PSTAT)->low < __val) \
5464                 (PSTAT)->high += 1; \
5465 } while (0)
5466
5467 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5468 {
5469         struct tg3_hw_stats *sp = tp->hw_stats;
5470
5471         if (!netif_carrier_ok(tp->dev))
5472                 return;
5473
5474         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5475         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5476         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5477         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5478         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5479         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5480         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5481         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5482         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5483         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5484         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5485         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5486         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5487
5488         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5489         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5490         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5491         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5492         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5493         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5494         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5495         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5496         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5497         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5498         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5499         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5500         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5501         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5502 }
5503
5504 static void tg3_timer(unsigned long __opaque)
5505 {
5506         struct tg3 *tp = (struct tg3 *) __opaque;
5507         unsigned long flags;
5508
5509         spin_lock_irqsave(&tp->lock, flags);
5510         spin_lock(&tp->tx_lock);
5511
5512         /* All of this garbage is because when using non-tagged
5513          * IRQ status the mailbox/status_block protocol the chip
5514          * uses with the cpu is race prone.
5515          */
5516         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5517                 tw32(GRC_LOCAL_CTRL,
5518                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5519         } else {
5520                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5521                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5522         }
5523
5524         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5525                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5526                 spin_unlock(&tp->tx_lock);
5527                 spin_unlock_irqrestore(&tp->lock, flags);
5528                 schedule_work(&tp->reset_task);
5529                 return;
5530         }
5531
5532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5534                 tg3_periodic_fetch_stats(tp);
5535
5536         /* This part only runs once per second. */
5537         if (!--tp->timer_counter) {
5538                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5539                         u32 mac_stat;
5540                         int phy_event;
5541
5542                         mac_stat = tr32(MAC_STATUS);
5543
5544                         phy_event = 0;
5545                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5546                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5547                                         phy_event = 1;
5548                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5549                                 phy_event = 1;
5550
5551                         if (phy_event)
5552                                 tg3_setup_phy(tp, 0);
5553                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5554                         u32 mac_stat = tr32(MAC_STATUS);
5555                         int need_setup = 0;
5556
5557                         if (netif_carrier_ok(tp->dev) &&
5558                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5559                                 need_setup = 1;
5560                         }
5561                         if (! netif_carrier_ok(tp->dev) &&
5562                             (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5563                                 need_setup = 1;
5564                         }
5565                         if (need_setup) {
5566                                 tw32_f(MAC_MODE,
5567                                      (tp->mac_mode &
5568                                       ~MAC_MODE_PORT_MODE_MASK));
5569                                 udelay(40);
5570                                 tw32_f(MAC_MODE, tp->mac_mode);
5571                                 udelay(40);
5572                                 tg3_setup_phy(tp, 0);
5573                         }
5574                 }
5575
5576                 tp->timer_counter = tp->timer_multiplier;
5577         }
5578
5579         /* Heartbeat is only sent once every 120 seconds.  */
5580         if (!--tp->asf_counter) {
5581                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5582                         u32 val;
5583
5584                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5585                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5586                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5587                         val = tr32(GRC_RX_CPU_EVENT);
5588                         val |= (1 << 14);
5589                         tw32(GRC_RX_CPU_EVENT, val);
5590                 }
5591                 tp->asf_counter = tp->asf_multiplier;
5592         }
5593
5594         spin_unlock(&tp->tx_lock);
5595         spin_unlock_irqrestore(&tp->lock, flags);
5596
5597         tp->timer.expires = jiffies + tp->timer_offset;
5598         add_timer(&tp->timer);
5599 }
5600
5601 static int tg3_open(struct net_device *dev)
5602 {
5603         struct tg3 *tp = netdev_priv(dev);
5604         int err;
5605
5606         spin_lock_irq(&tp->lock);
5607         spin_lock(&tp->tx_lock);
5608
5609         tg3_disable_ints(tp);
5610         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5611
5612         spin_unlock(&tp->tx_lock);
5613         spin_unlock_irq(&tp->lock);
5614
5615         /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5616          * tp->tg3_flags is accurate at that new place.
5617          */
5618         err = tg3_alloc_consistent(tp);
5619         if (err)
5620                 return err;
5621
5622         err = request_irq(dev->irq, tg3_interrupt,
5623                           SA_SHIRQ, dev->name, dev);
5624
5625         if (err) {
5626                 tg3_free_consistent(tp);
5627                 return err;
5628         }
5629
5630         spin_lock_irq(&tp->lock);
5631         spin_lock(&tp->tx_lock);
5632
5633         err = tg3_init_hw(tp);
5634         if (err) {
5635                 tg3_halt(tp);
5636                 tg3_free_rings(tp);
5637         } else {
5638                 tp->timer_offset = HZ / 10;
5639                 tp->timer_counter = tp->timer_multiplier = 10;
5640                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5641
5642                 init_timer(&tp->timer);
5643                 tp->timer.expires = jiffies + tp->timer_offset;
5644                 tp->timer.data = (unsigned long) tp;
5645                 tp->timer.function = tg3_timer;
5646                 add_timer(&tp->timer);
5647
5648                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5649         }
5650
5651         spin_unlock(&tp->tx_lock);
5652         spin_unlock_irq(&tp->lock);
5653
5654         if (err) {
5655                 free_irq(dev->irq, dev);
5656                 tg3_free_consistent(tp);
5657                 return err;
5658         }
5659
5660         spin_lock_irq(&tp->lock);
5661         spin_lock(&tp->tx_lock);
5662
5663         tg3_enable_ints(tp);
5664
5665         spin_unlock(&tp->tx_lock);
5666         spin_unlock_irq(&tp->lock);
5667
5668         netif_start_queue(dev);
5669
5670         return 0;
5671 }
5672
5673 #if 0
5674 /*static*/ void tg3_dump_state(struct tg3 *tp)
5675 {
5676         u32 val32, val32_2, val32_3, val32_4, val32_5;
5677         u16 val16;
5678         int i;
5679
5680         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5681         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5682         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5683                val16, val32);
5684
5685         /* MAC block */
5686         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5687                tr32(MAC_MODE), tr32(MAC_STATUS));
5688         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5689                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5690         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5691                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5692         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5693                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5694
5695         /* Send data initiator control block */
5696         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5697                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5698         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5699                tr32(SNDDATAI_STATSCTRL));
5700
5701         /* Send data completion control block */
5702         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5703
5704         /* Send BD ring selector block */
5705         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5706                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5707
5708         /* Send BD initiator control block */
5709         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5710                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5711
5712         /* Send BD completion control block */
5713         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5714
5715         /* Receive list placement control block */
5716         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5717                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5718         printk("       RCVLPC_STATSCTRL[%08x]\n",
5719                tr32(RCVLPC_STATSCTRL));
5720
5721         /* Receive data and receive BD initiator control block */
5722         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5723                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5724
5725         /* Receive data completion control block */
5726         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5727                tr32(RCVDCC_MODE));
5728
5729         /* Receive BD initiator control block */
5730         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5731                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5732
5733         /* Receive BD completion control block */
5734         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5735                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5736
5737         /* Receive list selector control block */
5738         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5739                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5740
5741         /* Mbuf cluster free block */
5742         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5743                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5744
5745         /* Host coalescing control block */
5746         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5747                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5748         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5749                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5750                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5751         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5752                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5753                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5754         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5755                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5756         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5757                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5758
5759         /* Memory arbiter control block */
5760         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5761                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5762
5763         /* Buffer manager control block */
5764         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5765                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5766         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5767                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5768         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5769                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5770                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5771                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5772
5773         /* Read DMA control block */
5774         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5775                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5776
5777         /* Write DMA control block */
5778         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5779                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5780
5781         /* DMA completion block */
5782         printk("DEBUG: DMAC_MODE[%08x]\n",
5783                tr32(DMAC_MODE));
5784
5785         /* GRC block */
5786         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5787                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5788         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5789                tr32(GRC_LOCAL_CTRL));
5790
5791         /* TG3_BDINFOs */
5792         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5793                tr32(RCVDBDI_JUMBO_BD + 0x0),
5794                tr32(RCVDBDI_JUMBO_BD + 0x4),
5795                tr32(RCVDBDI_JUMBO_BD + 0x8),
5796                tr32(RCVDBDI_JUMBO_BD + 0xc));
5797         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5798                tr32(RCVDBDI_STD_BD + 0x0),
5799                tr32(RCVDBDI_STD_BD + 0x4),
5800                tr32(RCVDBDI_STD_BD + 0x8),
5801                tr32(RCVDBDI_STD_BD + 0xc));
5802         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5803                tr32(RCVDBDI_MINI_BD + 0x0),
5804                tr32(RCVDBDI_MINI_BD + 0x4),
5805                tr32(RCVDBDI_MINI_BD + 0x8),
5806                tr32(RCVDBDI_MINI_BD + 0xc));
5807
5808         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5809         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5810         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5811         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5812         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5813                val32, val32_2, val32_3, val32_4);
5814
5815         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5816         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5817         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5818         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5819         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5820                val32, val32_2, val32_3, val32_4);
5821
5822         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5823         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5824         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5825         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5826         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5827         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5828                val32, val32_2, val32_3, val32_4, val32_5);
5829
5830         /* SW status block */
5831         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5832                tp->hw_status->status,
5833                tp->hw_status->status_tag,
5834                tp->hw_status->rx_jumbo_consumer,
5835                tp->hw_status->rx_consumer,
5836                tp->hw_status->rx_mini_consumer,
5837                tp->hw_status->idx[0].rx_producer,
5838                tp->hw_status->idx[0].tx_consumer);
5839
5840         /* SW statistics block */
5841         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5842                ((u32 *)tp->hw_stats)[0],
5843                ((u32 *)tp->hw_stats)[1],
5844                ((u32 *)tp->hw_stats)[2],
5845                ((u32 *)tp->hw_stats)[3]);
5846
5847         /* Mailboxes */
5848         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5849                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5850                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5851                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5852                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5853
5854         /* NIC side send descriptors. */
5855         for (i = 0; i < 6; i++) {
5856                 unsigned long txd;
5857
5858                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5859                         + (i * sizeof(struct tg3_tx_buffer_desc));
5860                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5861                        i,
5862                        readl(txd + 0x0), readl(txd + 0x4),
5863                        readl(txd + 0x8), readl(txd + 0xc));
5864         }
5865
5866         /* NIC side RX descriptors. */
5867         for (i = 0; i < 6; i++) {
5868                 unsigned long rxd;
5869
5870                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5871                         + (i * sizeof(struct tg3_rx_buffer_desc));
5872                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5873                        i,
5874                        readl(rxd + 0x0), readl(rxd + 0x4),
5875                        readl(rxd + 0x8), readl(rxd + 0xc));
5876                 rxd += (4 * sizeof(u32));
5877                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5878                        i,
5879                        readl(rxd + 0x0), readl(rxd + 0x4),
5880                        readl(rxd + 0x8), readl(rxd + 0xc));
5881         }
5882
5883         for (i = 0; i < 6; i++) {
5884                 unsigned long rxd;
5885
5886                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5887                         + (i * sizeof(struct tg3_rx_buffer_desc));
5888                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5889                        i,
5890                        readl(rxd + 0x0), readl(rxd + 0x4),
5891                        readl(rxd + 0x8), readl(rxd + 0xc));
5892                 rxd += (4 * sizeof(u32));
5893                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5894                        i,
5895                        readl(rxd + 0x0), readl(rxd + 0x4),
5896                        readl(rxd + 0x8), readl(rxd + 0xc));
5897         }
5898 }
5899 #endif
5900
5901 static struct net_device_stats *tg3_get_stats(struct net_device *);
5902 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5903
5904 static int tg3_close(struct net_device *dev)
5905 {
5906         struct tg3 *tp = netdev_priv(dev);
5907
5908         netif_stop_queue(dev);
5909
5910         del_timer_sync(&tp->timer);
5911
5912         spin_lock_irq(&tp->lock);
5913         spin_lock(&tp->tx_lock);
5914 #if 0
5915         tg3_dump_state(tp);
5916 #endif
5917
5918         tg3_disable_ints(tp);
5919
5920         tg3_halt(tp);
5921         tg3_free_rings(tp);
5922         tp->tg3_flags &=
5923                 ~(TG3_FLAG_INIT_COMPLETE |
5924                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5925         netif_carrier_off(tp->dev);
5926
5927         spin_unlock(&tp->tx_lock);
5928         spin_unlock_irq(&tp->lock);
5929
5930         free_irq(dev->irq, dev);
5931
5932         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5933                sizeof(tp->net_stats_prev));
5934         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5935                sizeof(tp->estats_prev));
5936
5937         tg3_free_consistent(tp);
5938
5939         return 0;
5940 }
5941
5942 static inline unsigned long get_stat64(tg3_stat64_t *val)
5943 {
5944         unsigned long ret;
5945
5946 #if (BITS_PER_LONG == 32)
5947         ret = val->low;
5948 #else
5949         ret = ((u64)val->high << 32) | ((u64)val->low);
5950 #endif
5951         return ret;
5952 }
5953
5954 static unsigned long calc_crc_errors(struct tg3 *tp)
5955 {
5956         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5957
5958         if (tp->phy_id != PHY_ID_SERDES &&
5959             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5960              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5961                 unsigned long flags;
5962                 u32 val;
5963
5964                 spin_lock_irqsave(&tp->lock, flags);
5965                 tg3_readphy(tp, 0x1e, &val);
5966                 tg3_writephy(tp, 0x1e, val | 0x8000);
5967                 tg3_readphy(tp, 0x14, &val);
5968                 spin_unlock_irqrestore(&tp->lock, flags);
5969
5970                 tp->phy_crc_errors += val;
5971
5972                 return tp->phy_crc_errors;
5973         }
5974
5975         return get_stat64(&hw_stats->rx_fcs_errors);
5976 }
5977
5978 #define ESTAT_ADD(member) \
5979         estats->member =        old_estats->member + \
5980                                 get_stat64(&hw_stats->member)
5981
5982 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5983 {
5984         struct tg3_ethtool_stats *estats = &tp->estats;
5985         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5986         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5987
5988         if (!hw_stats)
5989                 return old_estats;
5990
5991         ESTAT_ADD(rx_octets);
5992         ESTAT_ADD(rx_fragments);
5993         ESTAT_ADD(rx_ucast_packets);
5994         ESTAT_ADD(rx_mcast_packets);
5995         ESTAT_ADD(rx_bcast_packets);
5996         ESTAT_ADD(rx_fcs_errors);
5997         ESTAT_ADD(rx_align_errors);
5998         ESTAT_ADD(rx_xon_pause_rcvd);
5999         ESTAT_ADD(rx_xoff_pause_rcvd);
6000         ESTAT_ADD(rx_mac_ctrl_rcvd);
6001         ESTAT_ADD(rx_xoff_entered);
6002         ESTAT_ADD(rx_frame_too_long_errors);
6003         ESTAT_ADD(rx_jabbers);
6004         ESTAT_ADD(rx_undersize_packets);
6005         ESTAT_ADD(rx_in_length_errors);
6006         ESTAT_ADD(rx_out_length_errors);
6007         ESTAT_ADD(rx_64_or_less_octet_packets);
6008         ESTAT_ADD(rx_65_to_127_octet_packets);
6009         ESTAT_ADD(rx_128_to_255_octet_packets);
6010         ESTAT_ADD(rx_256_to_511_octet_packets);
6011         ESTAT_ADD(rx_512_to_1023_octet_packets);
6012         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6013         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6014         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6015         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6016         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6017
6018         ESTAT_ADD(tx_octets);
6019         ESTAT_ADD(tx_collisions);
6020         ESTAT_ADD(tx_xon_sent);
6021         ESTAT_ADD(tx_xoff_sent);
6022         ESTAT_ADD(tx_flow_control);
6023         ESTAT_ADD(tx_mac_errors);
6024         ESTAT_ADD(tx_single_collisions);
6025         ESTAT_ADD(tx_mult_collisions);
6026         ESTAT_ADD(tx_deferred);
6027         ESTAT_ADD(tx_excessive_collisions);
6028         ESTAT_ADD(tx_late_collisions);
6029         ESTAT_ADD(tx_collide_2times);
6030         ESTAT_ADD(tx_collide_3times);
6031         ESTAT_ADD(tx_collide_4times);
6032         ESTAT_ADD(tx_collide_5times);
6033         ESTAT_ADD(tx_collide_6times);
6034         ESTAT_ADD(tx_collide_7times);
6035         ESTAT_ADD(tx_collide_8times);
6036         ESTAT_ADD(tx_collide_9times);
6037         ESTAT_ADD(tx_collide_10times);
6038         ESTAT_ADD(tx_collide_11times);
6039         ESTAT_ADD(tx_collide_12times);
6040         ESTAT_ADD(tx_collide_13times);
6041         ESTAT_ADD(tx_collide_14times);
6042         ESTAT_ADD(tx_collide_15times);
6043         ESTAT_ADD(tx_ucast_packets);
6044         ESTAT_ADD(tx_mcast_packets);
6045         ESTAT_ADD(tx_bcast_packets);
6046         ESTAT_ADD(tx_carrier_sense_errors);
6047         ESTAT_ADD(tx_discards);
6048         ESTAT_ADD(tx_errors);
6049
6050         ESTAT_ADD(dma_writeq_full);
6051         ESTAT_ADD(dma_write_prioq_full);
6052         ESTAT_ADD(rxbds_empty);
6053         ESTAT_ADD(rx_discards);
6054         ESTAT_ADD(rx_errors);
6055         ESTAT_ADD(rx_threshold_hit);
6056
6057         ESTAT_ADD(dma_readq_full);
6058         ESTAT_ADD(dma_read_prioq_full);
6059         ESTAT_ADD(tx_comp_queue_full);
6060
6061         ESTAT_ADD(ring_set_send_prod_index);
6062         ESTAT_ADD(ring_status_update);
6063         ESTAT_ADD(nic_irqs);
6064         ESTAT_ADD(nic_avoided_irqs);
6065         ESTAT_ADD(nic_tx_threshold_hit);
6066
6067         return estats;
6068 }
6069
6070 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6071 {
6072         struct tg3 *tp = netdev_priv(dev);
6073         struct net_device_stats *stats = &tp->net_stats;
6074         struct net_device_stats *old_stats = &tp->net_stats_prev;
6075         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6076
6077         if (!hw_stats)
6078                 return old_stats;
6079
6080         stats->rx_packets = old_stats->rx_packets +
6081                 get_stat64(&hw_stats->rx_ucast_packets) +
6082                 get_stat64(&hw_stats->rx_mcast_packets) +
6083                 get_stat64(&hw_stats->rx_bcast_packets);
6084                 
6085         stats->tx_packets = old_stats->tx_packets +
6086                 get_stat64(&hw_stats->tx_ucast_packets) +
6087                 get_stat64(&hw_stats->tx_mcast_packets) +
6088                 get_stat64(&hw_stats->tx_bcast_packets);
6089
6090         stats->rx_bytes = old_stats->rx_bytes +
6091                 get_stat64(&hw_stats->rx_octets);
6092         stats->tx_bytes = old_stats->tx_bytes +
6093                 get_stat64(&hw_stats->tx_octets);
6094
6095         stats->rx_errors = old_stats->rx_errors +
6096                 get_stat64(&hw_stats->rx_errors) +
6097                 get_stat64(&hw_stats->rx_discards);
6098         stats->tx_errors = old_stats->tx_errors +
6099                 get_stat64(&hw_stats->tx_errors) +
6100                 get_stat64(&hw_stats->tx_mac_errors) +
6101                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6102                 get_stat64(&hw_stats->tx_discards);
6103
6104         stats->multicast = old_stats->multicast +
6105                 get_stat64(&hw_stats->rx_mcast_packets);
6106         stats->collisions = old_stats->collisions +
6107                 get_stat64(&hw_stats->tx_collisions);
6108
6109         stats->rx_length_errors = old_stats->rx_length_errors +
6110                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6111                 get_stat64(&hw_stats->rx_undersize_packets);
6112
6113         stats->rx_over_errors = old_stats->rx_over_errors +
6114                 get_stat64(&hw_stats->rxbds_empty);
6115         stats->rx_frame_errors = old_stats->rx_frame_errors +
6116                 get_stat64(&hw_stats->rx_align_errors);
6117         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6118                 get_stat64(&hw_stats->tx_discards);
6119         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6120                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6121
6122         stats->rx_crc_errors = old_stats->rx_crc_errors +
6123                 calc_crc_errors(tp);
6124
6125         return stats;
6126 }
6127
6128 static inline u32 calc_crc(unsigned char *buf, int len)
6129 {
6130         u32 reg;
6131         u32 tmp;
6132         int j, k;
6133
6134         reg = 0xffffffff;
6135
6136         for (j = 0; j < len; j++) {
6137                 reg ^= buf[j];
6138
6139                 for (k = 0; k < 8; k++) {
6140                         tmp = reg & 0x01;
6141
6142                         reg >>= 1;
6143
6144                         if (tmp) {
6145                                 reg ^= 0xedb88320;
6146                         }
6147                 }
6148         }
6149
6150         return ~reg;
6151 }
6152
6153 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6154 {
6155         /* accept or reject all multicast frames */
6156         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6157         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6158         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6159         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6160 }
6161
6162 static void __tg3_set_rx_mode(struct net_device *dev)
6163 {
6164         struct tg3 *tp = netdev_priv(dev);
6165         u32 rx_mode;
6166
6167         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6168                                   RX_MODE_KEEP_VLAN_TAG);
6169
6170         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6171          * flag clear.
6172          */
6173 #if TG3_VLAN_TAG_USED
6174         if (!tp->vlgrp &&
6175             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6176                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6177 #else
6178         /* By definition, VLAN is disabled always in this
6179          * case.
6180          */
6181         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6182                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6183 #endif
6184
6185         if (dev->flags & IFF_PROMISC) {
6186                 /* Promiscuous mode. */
6187                 rx_mode |= RX_MODE_PROMISC;
6188         } else if (dev->flags & IFF_ALLMULTI) {
6189                 /* Accept all multicast. */
6190                 tg3_set_multi (tp, 1);
6191         } else if (dev->mc_count < 1) {
6192                 /* Reject all multicast. */
6193                 tg3_set_multi (tp, 0);
6194         } else {
6195                 /* Accept one or more multicast(s). */
6196                 struct dev_mc_list *mclist;
6197                 unsigned int i;
6198                 u32 mc_filter[4] = { 0, };
6199                 u32 regidx;
6200                 u32 bit;
6201                 u32 crc;
6202
6203                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6204                      i++, mclist = mclist->next) {
6205
6206                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6207                         bit = ~crc & 0x7f;
6208                         regidx = (bit & 0x60) >> 5;
6209                         bit &= 0x1f;
6210                         mc_filter[regidx] |= (1 << bit);
6211                 }
6212
6213                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6214                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6215                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6216                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6217         }
6218
6219         if (rx_mode != tp->rx_mode) {
6220                 tp->rx_mode = rx_mode;
6221                 tw32_f(MAC_RX_MODE, rx_mode);
6222                 udelay(10);
6223         }
6224 }
6225
6226 static void tg3_set_rx_mode(struct net_device *dev)
6227 {
6228         struct tg3 *tp = netdev_priv(dev);
6229
6230         spin_lock_irq(&tp->lock);
6231         __tg3_set_rx_mode(dev);
6232         spin_unlock_irq(&tp->lock);
6233 }
6234
6235 #define TG3_REGDUMP_LEN         (32 * 1024)
6236
6237 static int tg3_get_regs_len(struct net_device *dev)
6238 {
6239         return TG3_REGDUMP_LEN;
6240 }
6241
6242 static void tg3_get_regs(struct net_device *dev,
6243                 struct ethtool_regs *regs, void *_p)
6244 {
6245         u32 *p = _p;
6246         struct tg3 *tp = netdev_priv(dev);
6247         u8 *orig_p = _p;
6248         int i;
6249
6250         regs->version = 0;
6251
6252         memset(p, 0, TG3_REGDUMP_LEN);
6253
6254         spin_lock_irq(&tp->lock);
6255         spin_lock(&tp->tx_lock);
6256
6257 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6258 #define GET_REG32_LOOP(base,len)                \
6259 do {    p = (u32 *)(orig_p + (base));           \
6260         for (i = 0; i < len; i += 4)            \
6261                 __GET_REG32((base) + i);        \
6262 } while (0)
6263 #define GET_REG32_1(reg)                        \
6264 do {    p = (u32 *)(orig_p + (reg));            \
6265         __GET_REG32((reg));                     \
6266 } while (0)
6267
6268         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6269         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6270         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6271         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6272         GET_REG32_1(SNDDATAC_MODE);
6273         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6274         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6275         GET_REG32_1(SNDBDC_MODE);
6276         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6277         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6278         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6279         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6280         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6281         GET_REG32_1(RCVDCC_MODE);
6282         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6283         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6284         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6285         GET_REG32_1(MBFREE_MODE);
6286         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6287         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6288         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6289         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6290         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6291         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6292         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6293         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6294         GET_REG32_LOOP(FTQ_RESET, 0x120);
6295         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6296         GET_REG32_1(DMAC_MODE);
6297         GET_REG32_LOOP(GRC_MODE, 0x4c);
6298         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6299                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6300
6301 #undef __GET_REG32
6302 #undef GET_REG32_LOOP
6303 #undef GET_REG32_1
6304
6305         spin_unlock(&tp->tx_lock);
6306         spin_unlock_irq(&tp->lock);
6307 }
6308
6309 static int tg3_get_eeprom_len(struct net_device *dev)
6310 {
6311         return EEPROM_CHIP_SIZE;
6312 }
6313
6314 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6315                                                  u32 offset, u32 *val);
6316 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6317 {
6318         struct tg3 *tp = dev->priv;
6319         int ret;
6320         u8  *pd;
6321         u32 i, offset, len, val, b_offset, b_count;
6322
6323         offset = eeprom->offset;
6324         len = eeprom->len;
6325         eeprom->len = 0;
6326
6327         ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6328         if (ret)
6329                 return ret;
6330         eeprom->magic = swab32(eeprom->magic);
6331
6332         if (offset & 3) {
6333                 /* adjustments to start on required 4 byte boundary */
6334                 b_offset = offset & 3;
6335                 b_count = 4 - b_offset;
6336                 if (b_count > len) {
6337                         /* i.e. offset=1 len=2 */
6338                         b_count = len;
6339                 }
6340                 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6341                 if (ret)
6342                         return ret;
6343                 memcpy(data, ((char*)&val) + b_offset, b_count);
6344                 len -= b_count;
6345                 offset += b_count;
6346                 eeprom->len += b_count;
6347         }
6348
6349         /* read bytes upto the last 4 byte boundary */
6350         pd = &data[eeprom->len];
6351         for (i = 0; i < (len - (len & 3)); i += 4) {
6352                 ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
6353                                 (u32*)(pd + i));
6354                 if (ret) {
6355                         eeprom->len += i;
6356                         return ret;
6357                 }
6358         }
6359         eeprom->len += i;
6360
6361         if (len & 3) {
6362                 /* read last bytes not ending on 4 byte boundary */
6363                 pd = &data[eeprom->len];
6364                 b_count = len & 3;
6365                 b_offset = offset + len - b_count;
6366                 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6367                 if (ret)
6368                         return ret;
6369                 memcpy(pd, ((char*)&val), b_count);
6370                 eeprom->len += b_count;
6371         }
6372         return 0;
6373 }
6374
6375 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6376 {
6377         struct tg3 *tp = netdev_priv(dev);
6378   
6379         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6380                                         tp->link_config.phy_is_low_power)
6381                 return -EAGAIN;
6382
6383         cmd->supported = (SUPPORTED_Autoneg);
6384
6385         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6386                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6387                                    SUPPORTED_1000baseT_Full);
6388
6389         if (tp->phy_id != PHY_ID_SERDES)
6390                 cmd->supported |= (SUPPORTED_100baseT_Half |
6391                                   SUPPORTED_100baseT_Full |
6392                                   SUPPORTED_10baseT_Half |
6393                                   SUPPORTED_10baseT_Full |
6394                                   SUPPORTED_MII);
6395         else
6396                 cmd->supported |= SUPPORTED_FIBRE;
6397   
6398         cmd->advertising = tp->link_config.advertising;
6399         cmd->speed = tp->link_config.active_speed;
6400         cmd->duplex = tp->link_config.active_duplex;
6401         cmd->port = 0;
6402         cmd->phy_address = PHY_ADDR;
6403         cmd->transceiver = 0;
6404         cmd->autoneg = tp->link_config.autoneg;
6405         cmd->maxtxpkt = 0;
6406         cmd->maxrxpkt = 0;
6407         return 0;
6408 }
6409   
6410 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6411 {
6412         struct tg3 *tp = netdev_priv(dev);
6413   
6414         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6415             tp->link_config.phy_is_low_power)
6416                 return -EAGAIN;
6417
6418         if (tp->phy_id == PHY_ID_SERDES) {
6419                 /* These are the only valid advertisement bits allowed.  */
6420                 if (cmd->autoneg == AUTONEG_ENABLE &&
6421                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6422                                           ADVERTISED_1000baseT_Full |
6423                                           ADVERTISED_Autoneg |
6424                                           ADVERTISED_FIBRE)))
6425                         return -EINVAL;
6426         }
6427
6428         spin_lock_irq(&tp->lock);
6429         spin_lock(&tp->tx_lock);
6430
6431         tp->link_config.autoneg = cmd->autoneg;
6432         if (cmd->autoneg == AUTONEG_ENABLE) {
6433                 tp->link_config.advertising = cmd->advertising;
6434                 tp->link_config.speed = SPEED_INVALID;
6435                 tp->link_config.duplex = DUPLEX_INVALID;
6436         } else {
6437                 tp->link_config.advertising = 0;
6438                 tp->link_config.speed = cmd->speed;
6439                 tp->link_config.duplex = cmd->duplex;
6440         }
6441   
6442         tg3_setup_phy(tp, 1);
6443         spin_unlock(&tp->tx_lock);
6444         spin_unlock_irq(&tp->lock);
6445   
6446         return 0;
6447 }
6448   
6449 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6450 {
6451         struct tg3 *tp = netdev_priv(dev);
6452   
6453         strcpy(info->driver, DRV_MODULE_NAME);
6454         strcpy(info->version, DRV_MODULE_VERSION);
6455         strcpy(info->bus_info, pci_name(tp->pdev));
6456 }
6457   
6458 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6459 {
6460         struct tg3 *tp = netdev_priv(dev);
6461   
6462         wol->supported = WAKE_MAGIC;
6463         wol->wolopts = 0;
6464         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6465                 wol->wolopts = WAKE_MAGIC;
6466         memset(&wol->sopass, 0, sizeof(wol->sopass));
6467 }
6468   
6469 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6470 {
6471         struct tg3 *tp = netdev_priv(dev);
6472   
6473         if (wol->wolopts & ~WAKE_MAGIC)
6474                 return -EINVAL;
6475         if ((wol->wolopts & WAKE_MAGIC) &&
6476             tp->phy_id == PHY_ID_SERDES &&
6477             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6478                 return -EINVAL;
6479   
6480         spin_lock_irq(&tp->lock);
6481         if (wol->wolopts & WAKE_MAGIC)
6482                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6483         else
6484                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6485         spin_unlock_irq(&tp->lock);
6486   
6487         return 0;
6488 }
6489   
6490 static u32 tg3_get_msglevel(struct net_device *dev)
6491 {
6492         struct tg3 *tp = netdev_priv(dev);
6493         return tp->msg_enable;
6494 }
6495   
6496 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6497 {
6498         struct tg3 *tp = netdev_priv(dev);
6499         tp->msg_enable = value;
6500 }
6501   
6502 #if TG3_TSO_SUPPORT != 0
6503 static int tg3_set_tso(struct net_device *dev, u32 value)
6504 {
6505         struct tg3 *tp = netdev_priv(dev);
6506
6507         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6508                 if (value)
6509                         return -EINVAL;
6510                 return 0;
6511         }
6512         return ethtool_op_set_tso(dev, value);
6513 }
6514 #endif
6515   
6516 static int tg3_nway_reset(struct net_device *dev)
6517 {
6518         struct tg3 *tp = netdev_priv(dev);
6519         u32 bmcr;
6520         int r;
6521   
6522         spin_lock_irq(&tp->lock);
6523         tg3_readphy(tp, MII_BMCR, &bmcr);
6524         tg3_readphy(tp, MII_BMCR, &bmcr);
6525         r = -EINVAL;
6526         if (bmcr & BMCR_ANENABLE) {
6527                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6528                 r = 0;
6529         }
6530         spin_unlock_irq(&tp->lock);
6531   
6532         return r;
6533 }
6534   
6535 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6536 {
6537         struct tg3 *tp = netdev_priv(dev);
6538   
6539         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6540         ering->rx_mini_max_pending = 0;
6541         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6542
6543         ering->rx_pending = tp->rx_pending;
6544         ering->rx_mini_pending = 0;
6545         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6546         ering->tx_pending = tp->tx_pending;
6547 }
6548   
6549 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6550 {
6551         struct tg3 *tp = netdev_priv(dev);
6552   
6553         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6554             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6555             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6556                 return -EINVAL;
6557   
6558         tg3_netif_stop(tp);
6559         spin_lock_irq(&tp->lock);
6560         spin_lock(&tp->tx_lock);
6561   
6562         tp->rx_pending = ering->rx_pending;
6563
6564         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6565             tp->rx_pending > 63)
6566                 tp->rx_pending = 63;
6567         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6568         tp->tx_pending = ering->tx_pending;
6569
6570         tg3_halt(tp);
6571         tg3_init_hw(tp);
6572         netif_wake_queue(tp->dev);
6573         spin_unlock(&tp->tx_lock);
6574         spin_unlock_irq(&tp->lock);
6575         tg3_netif_start(tp);
6576   
6577         return 0;
6578 }
6579   
6580 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6581 {
6582         struct tg3 *tp = netdev_priv(dev);
6583   
6584         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6585         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6586         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6587 }
6588   
6589 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6590 {
6591         struct tg3 *tp = netdev_priv(dev);
6592   
6593         tg3_netif_stop(tp);
6594         spin_lock_irq(&tp->lock);
6595         spin_lock(&tp->tx_lock);
6596         if (epause->autoneg)
6597                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6598         else
6599                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6600         if (epause->rx_pause)
6601                 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6602         else
6603                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6604         if (epause->tx_pause)
6605                 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6606         else
6607                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6608         tg3_halt(tp);
6609         tg3_init_hw(tp);
6610         spin_unlock(&tp->tx_lock);
6611         spin_unlock_irq(&tp->lock);
6612         tg3_netif_start(tp);
6613   
6614         return 0;
6615 }
6616   
6617 static u32 tg3_get_rx_csum(struct net_device *dev)
6618 {
6619         struct tg3 *tp = netdev_priv(dev);
6620         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6621 }
6622   
6623 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6624 {
6625         struct tg3 *tp = netdev_priv(dev);
6626   
6627         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6628                 if (data != 0)
6629                         return -EINVAL;
6630                 return 0;
6631         }
6632   
6633         spin_lock_irq(&tp->lock);
6634         if (data)
6635                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6636         else
6637                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6638         spin_unlock_irq(&tp->lock);
6639   
6640         return 0;
6641 }
6642   
6643 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6644 {
6645         struct tg3 *tp = netdev_priv(dev);
6646   
6647         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6648                 if (data != 0)
6649                         return -EINVAL;
6650                 return 0;
6651         }
6652   
6653         if (data)
6654                 dev->features |= NETIF_F_IP_CSUM;
6655         else
6656                 dev->features &= ~NETIF_F_IP_CSUM;
6657
6658         return 0;
6659 }
6660
6661 static int tg3_get_stats_count (struct net_device *dev)
6662 {
6663         return TG3_NUM_STATS;
6664 }
6665
6666 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6667 {
6668         switch (stringset) {
6669         case ETH_SS_STATS:
6670                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6671                 break;
6672         default:
6673                 WARN_ON(1);     /* we need a WARN() */
6674                 break;
6675         }
6676 }
6677
6678 static void tg3_get_ethtool_stats (struct net_device *dev,
6679                                    struct ethtool_stats *estats, u64 *tmp_stats)
6680 {
6681         struct tg3 *tp = dev->priv;
6682         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6683 }
6684
6685 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6686 {
6687         struct mii_ioctl_data *data = if_mii(ifr);
6688         struct tg3 *tp = netdev_priv(dev);
6689         int err;
6690
6691         switch(cmd) {
6692         case SIOCGMIIPHY:
6693                 data->phy_id = PHY_ADDR;
6694
6695                 /* fallthru */
6696         case SIOCGMIIREG: {
6697                 u32 mii_regval;
6698
6699                 spin_lock_irq(&tp->lock);
6700                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6701                 spin_unlock_irq(&tp->lock);
6702
6703                 data->val_out = mii_regval;
6704
6705                 return err;
6706         }
6707
6708         case SIOCSMIIREG:
6709                 if (!capable(CAP_NET_ADMIN))
6710                         return -EPERM;
6711
6712                 spin_lock_irq(&tp->lock);
6713                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6714                 spin_unlock_irq(&tp->lock);
6715
6716                 return err;
6717
6718         default:
6719                 /* do nothing */
6720                 break;
6721         }
6722         return -EOPNOTSUPP;
6723 }
6724
6725 #if TG3_VLAN_TAG_USED
6726 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6727 {
6728         struct tg3 *tp = netdev_priv(dev);
6729
6730         spin_lock_irq(&tp->lock);
6731         spin_lock(&tp->tx_lock);
6732
6733         tp->vlgrp = grp;
6734
6735         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6736         __tg3_set_rx_mode(dev);
6737
6738         spin_unlock(&tp->tx_lock);
6739         spin_unlock_irq(&tp->lock);
6740 }
6741
6742 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6743 {
6744         struct tg3 *tp = netdev_priv(dev);
6745
6746         spin_lock_irq(&tp->lock);
6747         spin_lock(&tp->tx_lock);
6748         if (tp->vlgrp)
6749                 tp->vlgrp->vlan_devices[vid] = NULL;
6750         spin_unlock(&tp->tx_lock);
6751         spin_unlock_irq(&tp->lock);
6752 }
6753 #endif
6754
6755 static struct ethtool_ops tg3_ethtool_ops = {
6756         .get_settings           = tg3_get_settings,
6757         .set_settings           = tg3_set_settings,
6758         .get_drvinfo            = tg3_get_drvinfo,
6759         .get_regs_len           = tg3_get_regs_len,
6760         .get_regs               = tg3_get_regs,
6761         .get_wol                = tg3_get_wol,
6762         .set_wol                = tg3_set_wol,
6763         .get_msglevel           = tg3_get_msglevel,
6764         .set_msglevel           = tg3_set_msglevel,
6765         .nway_reset             = tg3_nway_reset,
6766         .get_link               = ethtool_op_get_link,
6767         .get_eeprom_len         = tg3_get_eeprom_len,
6768         .get_eeprom             = tg3_get_eeprom,
6769         .get_ringparam          = tg3_get_ringparam,
6770         .set_ringparam          = tg3_set_ringparam,
6771         .get_pauseparam         = tg3_get_pauseparam,
6772         .set_pauseparam         = tg3_set_pauseparam,
6773         .get_rx_csum            = tg3_get_rx_csum,
6774         .set_rx_csum            = tg3_set_rx_csum,
6775         .get_tx_csum            = ethtool_op_get_tx_csum,
6776         .set_tx_csum            = tg3_set_tx_csum,
6777         .get_sg                 = ethtool_op_get_sg,
6778         .set_sg                 = ethtool_op_set_sg,
6779 #if TG3_TSO_SUPPORT != 0
6780         .get_tso                = ethtool_op_get_tso,
6781         .set_tso                = tg3_set_tso,
6782 #endif
6783         .get_strings            = tg3_get_strings,
6784         .get_stats_count        = tg3_get_stats_count,
6785         .get_ethtool_stats      = tg3_get_ethtool_stats,
6786 };
6787
6788 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6789 static void __devinit tg3_nvram_init(struct tg3 *tp)
6790 {
6791         int j;
6792
6793         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6794                 return;
6795
6796         tw32_f(GRC_EEPROM_ADDR,
6797              (EEPROM_ADDR_FSM_RESET |
6798               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6799                EEPROM_ADDR_CLKPERD_SHIFT)));
6800
6801         /* XXX schedule_timeout() ... */
6802         for (j = 0; j < 100; j++)
6803                 udelay(10);
6804
6805         /* Enable seeprom accesses. */
6806         tw32_f(GRC_LOCAL_CTRL,
6807              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6808         udelay(100);
6809
6810         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6811             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6812                 u32 nvcfg1;
6813
6814                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6815                         u32 nvaccess = tr32(NVRAM_ACCESS);
6816
6817                         tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6818                 }
6819
6820                 nvcfg1 = tr32(NVRAM_CFG1);
6821
6822                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6823                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6824                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6825                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6826                 } else {
6827                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6828                         tw32(NVRAM_CFG1, nvcfg1);
6829                 }
6830
6831                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6832                         u32 nvaccess = tr32(NVRAM_ACCESS);
6833
6834                         tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6835                 }
6836         } else {
6837                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6838         }
6839 }
6840
6841 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6842                                                  u32 offset, u32 *val)
6843 {
6844         u32 tmp;
6845         int i;
6846
6847         if (offset > EEPROM_ADDR_ADDR_MASK ||
6848             (offset % 4) != 0)
6849                 return -EINVAL;
6850
6851         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6852                                         EEPROM_ADDR_DEVID_MASK |
6853                                         EEPROM_ADDR_READ);
6854         tw32(GRC_EEPROM_ADDR,
6855              tmp |
6856              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6857              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6858               EEPROM_ADDR_ADDR_MASK) |
6859              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6860
6861         for (i = 0; i < 10000; i++) {
6862                 tmp = tr32(GRC_EEPROM_ADDR);
6863
6864                 if (tmp & EEPROM_ADDR_COMPLETE)
6865                         break;
6866                 udelay(100);
6867         }
6868         if (!(tmp & EEPROM_ADDR_COMPLETE))
6869                 return -EBUSY;
6870
6871         *val = tr32(GRC_EEPROM_DATA);
6872         return 0;
6873 }
6874
6875 static int __devinit tg3_nvram_read(struct tg3 *tp,
6876                                     u32 offset, u32 *val)
6877 {
6878         int i;
6879
6880         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6881                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6882                 return -EINVAL;
6883         }
6884
6885         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6886                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6887
6888         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6889                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6890                           NVRAM_BUFFERED_PAGE_POS) +
6891                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6892
6893         if (offset > NVRAM_ADDR_MSK)
6894                 return -EINVAL;
6895
6896         tg3_nvram_lock(tp);
6897
6898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6899                 u32 nvaccess = tr32(NVRAM_ACCESS);
6900
6901                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6902         }
6903
6904         tw32(NVRAM_ADDR, offset);
6905         tw32(NVRAM_CMD,
6906              NVRAM_CMD_RD | NVRAM_CMD_GO |
6907              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6908
6909         /* Wait for done bit to clear. */
6910         for (i = 0; i < 1000; i++) {
6911                 udelay(10);
6912                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6913                         udelay(10);
6914                         *val = swab32(tr32(NVRAM_RDDATA));
6915                         break;
6916                 }
6917         }
6918
6919         tg3_nvram_unlock(tp);
6920
6921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6922                 u32 nvaccess = tr32(NVRAM_ACCESS);
6923
6924                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6925         }
6926
6927         if (i >= 1000)
6928                 return -EBUSY;
6929
6930         return 0;
6931 }
6932
6933 struct subsys_tbl_ent {
6934         u16 subsys_vendor, subsys_devid;
6935         u32 phy_id;
6936 };
6937
6938 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6939         /* Broadcom boards. */
6940         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6941         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6942         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6943         { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
6944         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6945         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6946         { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
6947         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6948         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6949         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6950         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6951
6952         /* 3com boards. */
6953         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6954         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6955         { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
6956         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6957         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6958
6959         /* DELL boards. */
6960         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6961         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6962         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6963         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6964
6965         /* Compaq boards. */
6966         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6967         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6968         { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
6969         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6970         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6971
6972         /* IBM boards. */
6973         { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6974 };
6975
6976 static int __devinit tg3_phy_probe(struct tg3 *tp)
6977 {
6978         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6979         u32 hw_phy_id, hw_phy_id_masked;
6980         u32 val;
6981         int i, eeprom_signature_found, err;
6982
6983         tp->phy_id = PHY_ID_INVALID;
6984         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6985                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6986                      tp->pdev->subsystem_vendor) &&
6987                     (subsys_id_to_phy_id[i].subsys_devid ==
6988                      tp->pdev->subsystem_device)) {
6989                         tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6990                         break;
6991                 }
6992         }
6993
6994         eeprom_phy_id = PHY_ID_INVALID;
6995         eeprom_signature_found = 0;
6996         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6997         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6998                 u32 nic_cfg, led_cfg;
6999
7000                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7001                 tp->nic_sram_data_cfg = nic_cfg;
7002
7003                 eeprom_signature_found = 1;
7004
7005                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7006                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
7007                         eeprom_phy_id = PHY_ID_SERDES;
7008                 } else {
7009                         u32 nic_phy_id;
7010
7011                         tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7012                         if (nic_phy_id != 0) {
7013                                 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7014                                 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7015
7016                                 eeprom_phy_id  = (id1 >> 16) << 10;
7017                                 eeprom_phy_id |= (id2 & 0xfc00) << 16;
7018                                 eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7019                         }
7020                 }
7021
7022                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7023                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7024                         led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7025                                     SHASTA_EXT_LED_MODE_MASK);
7026                 } else
7027                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7028
7029                 switch (led_cfg) {
7030                 default:
7031                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7032                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7033                         break;
7034
7035                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7036                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7037                         break;
7038
7039                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7040                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7041                         break;
7042
7043                 case SHASTA_EXT_LED_SHARED:
7044                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7045                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7046                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7047                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7048                                                  LED_CTRL_MODE_PHY_2);
7049                         break;
7050
7051                 case SHASTA_EXT_LED_MAC:
7052                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7053                         break;
7054
7055                 case SHASTA_EXT_LED_COMBO:
7056                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7057                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7058                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7059                                                  LED_CTRL_MODE_PHY_2);
7060                         break;
7061
7062                 };
7063
7064                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7065                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7066                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7067                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7068
7069                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7070                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7071                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7072                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7073                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7074
7075                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7076                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7077                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7078                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7079                 }
7080                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7081                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7082         }
7083
7084         /* Reading the PHY ID register can conflict with ASF
7085          * firwmare access to the PHY hardware.
7086          */
7087         err = 0;
7088         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7089                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7090         } else {
7091                 /* Now read the physical PHY_ID from the chip and verify
7092                  * that it is sane.  If it doesn't look good, we fall back
7093                  * to either the hard-coded table based PHY_ID and failing
7094                  * that the value found in the eeprom area.
7095                  */
7096                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7097                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7098
7099                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7100                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7101                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7102
7103                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7104         }
7105
7106         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7107                 tp->phy_id = hw_phy_id;
7108         } else {
7109                 /* phy_id currently holds the value found in the
7110                  * subsys_id_to_phy_id[] table or PHY_ID_INVALID
7111                  * if a match was not found there.
7112                  */
7113                 if (tp->phy_id == PHY_ID_INVALID) {
7114                         if (!eeprom_signature_found ||
7115                             !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
7116                                 return -ENODEV;
7117                         tp->phy_id = eeprom_phy_id;
7118                 }
7119         }
7120
7121         if (tp->phy_id != PHY_ID_SERDES &&
7122             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7123                 u32 bmsr, adv_reg, tg3_ctrl;
7124
7125                 tg3_readphy(tp, MII_BMSR, &bmsr);
7126                 tg3_readphy(tp, MII_BMSR, &bmsr);
7127
7128                 if (bmsr & BMSR_LSTATUS)
7129                         goto skip_phy_reset;
7130                     
7131                 err = tg3_phy_reset(tp);
7132                 if (err)
7133                         return err;
7134
7135                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7136                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7137                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7138                 tg3_ctrl = 0;
7139                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7140                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7141                                     MII_TG3_CTRL_ADV_1000_FULL);
7142                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7143                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7144                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7145                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7146                 }
7147
7148                 if (!tg3_copper_is_advertising_all(tp)) {
7149                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7150
7151                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7152                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7153
7154                         tg3_writephy(tp, MII_BMCR,
7155                                      BMCR_ANENABLE | BMCR_ANRESTART);
7156                 }
7157                 tg3_phy_set_wirespeed(tp);
7158
7159                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7160                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7161                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7162         }
7163
7164 skip_phy_reset:
7165         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7166                 err = tg3_init_5401phy_dsp(tp);
7167                 if (err)
7168                         return err;
7169         }
7170
7171         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7172                 err = tg3_init_5401phy_dsp(tp);
7173         }
7174
7175         if (!eeprom_signature_found)
7176                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7177
7178         if (tp->phy_id == PHY_ID_SERDES)
7179                 tp->link_config.advertising =
7180                         (ADVERTISED_1000baseT_Half |
7181                          ADVERTISED_1000baseT_Full |
7182                          ADVERTISED_Autoneg |
7183                          ADVERTISED_FIBRE);
7184         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7185                 tp->link_config.advertising &=
7186                         ~(ADVERTISED_1000baseT_Half |
7187                           ADVERTISED_1000baseT_Full);
7188
7189         return err;
7190 }
7191
7192 static void __devinit tg3_read_partno(struct tg3 *tp)
7193 {
7194         unsigned char vpd_data[256];
7195         int i;
7196
7197         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
7198                 /* Sun decided not to put the necessary bits in the
7199                  * NVRAM of their onboard tg3 parts :(
7200                  */
7201                 strcpy(tp->board_part_number, "Sun 5704");
7202                 return;
7203         }
7204
7205         for (i = 0; i < 256; i += 4) {
7206                 u32 tmp;
7207
7208                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7209                         goto out_not_found;
7210
7211                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7212                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7213                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7214                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7215         }
7216
7217         /* Now parse and find the part number. */
7218         for (i = 0; i < 256; ) {
7219                 unsigned char val = vpd_data[i];
7220                 int block_end;
7221
7222                 if (val == 0x82 || val == 0x91) {
7223                         i = (i + 3 +
7224                              (vpd_data[i + 1] +
7225                               (vpd_data[i + 2] << 8)));
7226                         continue;
7227                 }
7228
7229                 if (val != 0x90)
7230                         goto out_not_found;
7231
7232                 block_end = (i + 3 +
7233                              (vpd_data[i + 1] +
7234                               (vpd_data[i + 2] << 8)));
7235                 i += 3;
7236                 while (i < block_end) {
7237                         if (vpd_data[i + 0] == 'P' &&
7238                             vpd_data[i + 1] == 'N') {
7239                                 int partno_len = vpd_data[i + 2];
7240
7241                                 if (partno_len > 24)
7242                                         goto out_not_found;
7243
7244                                 memcpy(tp->board_part_number,
7245                                        &vpd_data[i + 3],
7246                                        partno_len);
7247
7248                                 /* Success. */
7249                                 return;
7250                         }
7251                 }
7252
7253                 /* Part number not found. */
7254                 goto out_not_found;
7255         }
7256
7257 out_not_found:
7258         strcpy(tp->board_part_number, "none");
7259 }
7260
7261 #ifdef CONFIG_SPARC64
7262 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
7263 {
7264         struct pci_dev *pdev = tp->pdev;
7265         struct pcidev_cookie *pcp = pdev->sysdata;
7266
7267         if (pcp != NULL) {
7268                 int node = pcp->prom_node;
7269                 u32 venid, devid;
7270                 int err;
7271
7272                 err = prom_getproperty(node, "subsystem-vendor-id",
7273                                        (char *) &venid, sizeof(venid));
7274                 if (err == 0 || err == -1)
7275                         return 0;
7276                 err = prom_getproperty(node, "subsystem-id",
7277                                        (char *) &devid, sizeof(devid));
7278                 if (err == 0 || err == -1)
7279                         return 0;
7280
7281                 if (venid == PCI_VENDOR_ID_SUN &&
7282                     devid == PCI_DEVICE_ID_TIGON3_5704)
7283                         return 1;
7284         }
7285         return 0;
7286 }
7287 #endif
7288
7289 static int __devinit tg3_get_invariants(struct tg3 *tp)
7290 {
7291         u32 misc_ctrl_reg;
7292         u32 cacheline_sz_reg;
7293         u32 pci_state_reg, grc_misc_cfg;
7294         u32 val;
7295         u16 pci_cmd;
7296         int err;
7297
7298 #ifdef CONFIG_SPARC64
7299         if (tg3_is_sun_5704(tp))
7300                 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
7301 #endif
7302
7303         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7304          * reordering to the mailbox registers done by the host
7305          * controller can cause major troubles.  We read back from
7306          * every mailbox register write to force the writes to be
7307          * posted to the chip in order.
7308          */
7309         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7310                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7311             pci_find_device(PCI_VENDOR_ID_INTEL,
7312                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7313             pci_find_device(PCI_VENDOR_ID_INTEL,
7314                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7315             pci_find_device(PCI_VENDOR_ID_INTEL,
7316                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7317             pci_find_device(PCI_VENDOR_ID_AMD,
7318                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7319                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7320
7321         /* Force memory write invalidate off.  If we leave it on,
7322          * then on 5700_BX chips we have to enable a workaround.
7323          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7324          * to match the cacheline size.  The Broadcom driver have this
7325          * workaround but turns MWI off all the times so never uses
7326          * it.  This seems to suggest that the workaround is insufficient.
7327          */
7328         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7329         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7330         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7331
7332         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7333          * has the register indirect write enable bit set before
7334          * we try to access any of the MMIO registers.  It is also
7335          * critical that the PCI-X hw workaround situation is decided
7336          * before that as well.
7337          */
7338         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7339                               &misc_ctrl_reg);
7340
7341         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7342                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7343
7344         /* Initialize misc host control in PCI block. */
7345         tp->misc_host_ctrl |= (misc_ctrl_reg &
7346                                MISC_HOST_CTRL_CHIPREV);
7347         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7348                                tp->misc_host_ctrl);
7349
7350         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7351                               &cacheline_sz_reg);
7352
7353         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7354         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7355         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7356         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7357
7358         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7359                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7360
7361         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7362             tp->pci_lat_timer < 64) {
7363                 tp->pci_lat_timer = 64;
7364
7365                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7366                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7367                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7368                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7369
7370                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7371                                        cacheline_sz_reg);
7372         }
7373
7374         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7375                               &pci_state_reg);
7376
7377         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7378                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7379
7380                 /* If this is a 5700 BX chipset, and we are in PCI-X
7381                  * mode, enable register write workaround.
7382                  *
7383                  * The workaround is to use indirect register accesses
7384                  * for all chip writes not to mailbox registers.
7385                  */
7386                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7387                         u32 pm_reg;
7388                         u16 pci_cmd;
7389
7390                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7391
7392                         /* The chip can have it's power management PCI config
7393                          * space registers clobbered due to this bug.
7394                          * So explicitly force the chip into D0 here.
7395                          */
7396                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7397                                               &pm_reg);
7398                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7399                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7400                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7401                                                pm_reg);
7402
7403                         /* Also, force SERR#/PERR# in PCI command. */
7404                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7405                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7406                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7407                 }
7408         }
7409
7410         /* Back to back register writes can cause problems on this chip,
7411          * the workaround is to read back all reg writes except those to
7412          * mailbox regs.  See tg3_write_indirect_reg32().
7413          *
7414          * PCI Express 5750_A0 rev chips need this workaround too.
7415          */
7416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7417             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7418              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7419                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7420
7421         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7422                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7423         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7424                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7425
7426         /* Chip-specific fixup from Broadcom driver */
7427         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7428             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7429                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7430                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7431         }
7432
7433         /* Force the chip into D0. */
7434         err = tg3_set_power_state(tp, 0);
7435         if (err) {
7436                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7437                        pci_name(tp->pdev));
7438                 return err;
7439         }
7440
7441         /* 5700 B0 chips do not support checksumming correctly due
7442          * to hardware bugs.
7443          */
7444         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7445                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7446
7447         /* Pseudo-header checksum is done by hardware logic and not
7448          * the offload processers, so make the chip do the pseudo-
7449          * header checksums on receive.  For transmit it is more
7450          * convenient to do the pseudo-header checksum in software
7451          * as Linux does that on transmit for us in all cases.
7452          */
7453         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7454         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7455
7456         /* Derive initial jumbo mode from MTU assigned in
7457          * ether_setup() via the alloc_etherdev() call
7458          */
7459         if (tp->dev->mtu > ETH_DATA_LEN)
7460                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7461
7462         /* Determine WakeOnLan speed to use. */
7463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7464             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7465             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7466             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7467                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7468         } else {
7469                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7470         }
7471
7472         /* A few boards don't want Ethernet@WireSpeed phy feature */
7473         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7474             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7475              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7476              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7477                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7478
7479         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7480             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7481                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7482         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7483                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7484
7485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7487                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7488
7489         /* Only 5701 and later support tagged irq status mode.
7490          * Also, 5788 chips cannot use tagged irq status.
7491          *
7492          * However, since we are using NAPI avoid tagged irq status
7493          * because the interrupt condition is more difficult to
7494          * fully clear in that mode.
7495          */
7496         tp->coalesce_mode = 0;
7497
7498         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7499             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7500                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7501
7502         /* Initialize MAC MI mode, polling disabled. */
7503         tw32_f(MAC_MI_MODE, tp->mi_mode);
7504         udelay(80);
7505
7506         /* Initialize data/descriptor byte/word swapping. */
7507         val = tr32(GRC_MODE);
7508         val &= GRC_MODE_HOST_STACKUP;
7509         tw32(GRC_MODE, val | tp->grc_mode);
7510
7511         tg3_switch_clocks(tp);
7512
7513         /* Clear this out for sanity. */
7514         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7515
7516         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7517                               &pci_state_reg);
7518         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7519             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7520                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7521
7522                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7523                     chiprevid == CHIPREV_ID_5701_B0 ||
7524                     chiprevid == CHIPREV_ID_5701_B2 ||
7525                     chiprevid == CHIPREV_ID_5701_B5) {
7526                         unsigned long sram_base;
7527
7528                         /* Write some dummy words into the SRAM status block
7529                          * area, see if it reads back correctly.  If the return
7530                          * value is bad, force enable the PCIX workaround.
7531                          */
7532                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7533
7534                         writel(0x00000000, sram_base);
7535                         writel(0x00000000, sram_base + 4);
7536                         writel(0xffffffff, sram_base + 4);
7537                         if (readl(sram_base) != 0x00000000)
7538                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7539                 }
7540         }
7541
7542         udelay(50);
7543         tg3_nvram_init(tp);
7544
7545         /* Always use host TXDs, it performs better in particular
7546          * with multi-frag packets.  The tests below are kept here
7547          * as documentation should we change this decision again
7548          * in the future.
7549          */
7550         tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7551
7552 #if 0
7553         /* Determine if TX descriptors will reside in
7554          * main memory or in the chip SRAM.
7555          */
7556         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
7557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7558             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7559                 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7560 #endif
7561
7562         grc_misc_cfg = tr32(GRC_MISC_CFG);
7563         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7564
7565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7566             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7567                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7568                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7569         }
7570
7571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7572             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7573              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7574                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7575
7576         /* these are limited to 10/100 only */
7577         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7578              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7579             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7580              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7581              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7582               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7583               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7584             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7585              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7586                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7587
7588         err = tg3_phy_probe(tp);
7589         if (err) {
7590                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7591                        pci_name(tp->pdev), err);
7592                 /* ... but do not return immediately ... */
7593         }
7594
7595         tg3_read_partno(tp);
7596
7597         if (tp->phy_id == PHY_ID_SERDES) {
7598                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7599         } else {
7600                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7601                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7602                 else
7603                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7604         }
7605
7606         /* 5700 {AX,BX} chips have a broken status block link
7607          * change bit implementation, so we must use the
7608          * status register in those cases.
7609          */
7610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7611                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7612         else
7613                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7614
7615         /* The led_ctrl is set during tg3_phy_probe, here we might
7616          * have to force the link status polling mechanism based
7617          * upon subsystem IDs.
7618          */
7619         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7620             tp->phy_id != PHY_ID_SERDES) {
7621                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7622                                   TG3_FLAG_USE_LINKCHG_REG);
7623         }
7624
7625         /* For all SERDES we poll the MAC status register. */
7626         if (tp->phy_id == PHY_ID_SERDES)
7627                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7628         else
7629                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7630
7631         /* 5700 BX chips need to have their TX producer index mailboxes
7632          * written twice to workaround a bug.
7633          */
7634         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7635                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7636         else
7637                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7638
7639         /* 5700 chips can get confused if TX buffers straddle the
7640          * 4GB address boundary in some cases.
7641          */
7642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7643                 tp->dev->hard_start_xmit = tg3_start_xmit_4gbug;
7644         else
7645                 tp->dev->hard_start_xmit = tg3_start_xmit;
7646
7647         tp->rx_offset = 2;
7648         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7649             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7650                 tp->rx_offset = 0;
7651
7652         /* By default, disable wake-on-lan.  User can change this
7653          * using ETHTOOL_SWOL.
7654          */
7655         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7656
7657         return err;
7658 }
7659
7660 #ifdef CONFIG_SPARC64
7661 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7662 {
7663         struct net_device *dev = tp->dev;
7664         struct pci_dev *pdev = tp->pdev;
7665         struct pcidev_cookie *pcp = pdev->sysdata;
7666
7667         if (pcp != NULL) {
7668                 int node = pcp->prom_node;
7669
7670                 if (prom_getproplen(node, "local-mac-address") == 6) {
7671                         prom_getproperty(node, "local-mac-address",
7672                                          dev->dev_addr, 6);
7673                         return 0;
7674                 }
7675         }
7676         return -ENODEV;
7677 }
7678
7679 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7680 {
7681         struct net_device *dev = tp->dev;
7682
7683         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7684         return 0;
7685 }
7686 #endif
7687
7688 static int __devinit tg3_get_device_address(struct tg3 *tp)
7689 {
7690         struct net_device *dev = tp->dev;
7691         u32 hi, lo, mac_offset;
7692
7693 #ifdef CONFIG_SPARC64
7694         if (!tg3_get_macaddr_sparc(tp))
7695                 return 0;
7696 #endif
7697
7698         mac_offset = 0x7c;
7699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7700             !(tp->tg3_flags & TG3_FLG2_SUN_5704)) {
7701                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7702                         mac_offset = 0xcc;
7703                 if (tg3_nvram_lock(tp))
7704                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7705                 else
7706                         tg3_nvram_unlock(tp);
7707         }
7708
7709         /* First try to get it from MAC address mailbox. */
7710         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7711         if ((hi >> 16) == 0x484b) {
7712                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7713                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7714
7715                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7716                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7717                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7718                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7719                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7720         }
7721         /* Next, try NVRAM. */
7722         else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7723                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7724                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7725                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7726                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7727                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7728                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7729                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7730                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7731         }
7732         /* Finally just fetch it out of the MAC control regs. */
7733         else {
7734                 hi = tr32(MAC_ADDR_0_HIGH);
7735                 lo = tr32(MAC_ADDR_0_LOW);
7736
7737                 dev->dev_addr[5] = lo & 0xff;
7738                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7739                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7740                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7741                 dev->dev_addr[1] = hi & 0xff;
7742                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7743         }
7744
7745         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7746 #ifdef CONFIG_SPARC64
7747                 if (!tg3_get_default_macaddr_sparc(tp))
7748                         return 0;
7749 #endif
7750                 return -EINVAL;
7751         }
7752         return 0;
7753 }
7754
7755 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7756 {
7757         struct tg3_internal_buffer_desc test_desc;
7758         u32 sram_dma_descs;
7759         int i, ret;
7760
7761         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7762
7763         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7764         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7765         tw32(RDMAC_STATUS, 0);
7766         tw32(WDMAC_STATUS, 0);
7767
7768         tw32(BUFMGR_MODE, 0);
7769         tw32(FTQ_RESET, 0);
7770
7771         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7772         test_desc.addr_lo = buf_dma & 0xffffffff;
7773         test_desc.nic_mbuf = 0x00002100;
7774         test_desc.len = size;
7775
7776         /*
7777          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7778          * the *second* time the tg3 driver was getting loaded after an
7779          * initial scan.
7780          *
7781          * Broadcom tells me:
7782          *   ...the DMA engine is connected to the GRC block and a DMA
7783          *   reset may affect the GRC block in some unpredictable way...
7784          *   The behavior of resets to individual blocks has not been tested.
7785          *
7786          * Broadcom noted the GRC reset will also reset all sub-components.
7787          */
7788         if (to_device) {
7789                 test_desc.cqid_sqid = (13 << 8) | 2;
7790
7791                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7792                 udelay(40);
7793         } else {
7794                 test_desc.cqid_sqid = (16 << 8) | 7;
7795
7796                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7797                 udelay(40);
7798         }
7799         test_desc.flags = 0x00000005;
7800
7801         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7802                 u32 val;
7803
7804                 val = *(((u32 *)&test_desc) + i);
7805                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7806                                        sram_dma_descs + (i * sizeof(u32)));
7807                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7808         }
7809         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7810
7811         if (to_device) {
7812                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7813         } else {
7814                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7815         }
7816
7817         ret = -ENODEV;
7818         for (i = 0; i < 40; i++) {
7819                 u32 val;
7820
7821                 if (to_device)
7822                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7823                 else
7824                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7825                 if ((val & 0xffff) == sram_dma_descs) {
7826                         ret = 0;
7827                         break;
7828                 }
7829
7830                 udelay(100);
7831         }
7832
7833         return ret;
7834 }
7835
7836 #define TEST_BUFFER_SIZE        0x400
7837
7838 static int __devinit tg3_test_dma(struct tg3 *tp)
7839 {
7840         dma_addr_t buf_dma;
7841         u32 *buf;
7842         int ret;
7843
7844         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7845         if (!buf) {
7846                 ret = -ENOMEM;
7847                 goto out_nofree;
7848         }
7849
7850         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7851                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7852
7853 #ifndef CONFIG_X86
7854         {
7855                 u8 byte;
7856                 int cacheline_size;
7857                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7858
7859                 if (byte == 0)
7860                         cacheline_size = 1024;
7861                 else
7862                         cacheline_size = (int) byte * 4;
7863
7864                 switch (cacheline_size) {
7865                 case 16:
7866                 case 32:
7867                 case 64:
7868                 case 128:
7869                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7870                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7871                                 tp->dma_rwctrl |=
7872                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7873                                 break;
7874                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7875                                 tp->dma_rwctrl &=
7876                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
7877                                 tp->dma_rwctrl |=
7878                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7879                                 break;
7880                         }
7881                         /* fallthrough */
7882                 case 256:
7883                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7884                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7885                                 tp->dma_rwctrl |=
7886                                         DMA_RWCTRL_WRITE_BNDRY_256;
7887                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7888                                 tp->dma_rwctrl |=
7889                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7890                 };
7891         }
7892 #endif
7893
7894         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7895                 tp->dma_rwctrl |= 0x001f0000;
7896         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7898                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7899                         tp->dma_rwctrl |= 0x003f0000;
7900                 else
7901                         tp->dma_rwctrl |= 0x003f000f;
7902         } else {
7903                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7904                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7905                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7906
7907                         if (ccval == 0x6 || ccval == 0x7)
7908                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7909
7910                         /* Set bit 23 to renable PCIX hw bug fix */
7911                         tp->dma_rwctrl |= 0x009f0000;
7912                 } else {
7913                         tp->dma_rwctrl |= 0x001b000f;
7914                 }
7915         }
7916
7917         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7918             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7919                 tp->dma_rwctrl &= 0xfffffff0;
7920
7921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7923                 /* Remove this if it causes problems for some boards. */
7924                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7925
7926                 /* On 5700/5701 chips, we need to set this bit.
7927                  * Otherwise the chip will issue cacheline transactions
7928                  * to streamable DMA memory with not all the byte
7929                  * enables turned on.  This is an error on several
7930                  * RISC PCI controllers, in particular sparc64.
7931                  *
7932                  * On 5703/5704 chips, this bit has been reassigned
7933                  * a different meaning.  In particular, it is used
7934                  * on those chips to enable a PCI-X workaround.
7935                  */
7936                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7937         }
7938
7939         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7940
7941 #if 0
7942         /* Unneeded, already done by tg3_get_invariants.  */
7943         tg3_switch_clocks(tp);
7944 #endif
7945
7946         ret = 0;
7947         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7948             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7949                 goto out;
7950
7951         while (1) {
7952                 u32 *p = buf, i;
7953
7954                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7955                         p[i] = i;
7956
7957                 /* Send the buffer to the chip. */
7958                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7959                 if (ret) {
7960                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7961                         break;
7962                 }
7963
7964 #if 0
7965                 /* validate data reached card RAM correctly. */
7966                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7967                         u32 val;
7968                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7969                         if (le32_to_cpu(val) != p[i]) {
7970                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
7971                                 /* ret = -ENODEV here? */
7972                         }
7973                         p[i] = 0;
7974                 }
7975 #endif
7976                 /* Now read it back. */
7977                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7978                 if (ret) {
7979                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7980
7981                         break;
7982                 }
7983
7984                 /* Verify it. */
7985                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7986                         if (p[i] == i)
7987                                 continue;
7988
7989                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7990                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7991                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7992                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7993                                 break;
7994                         } else {
7995                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7996                                 ret = -ENODEV;
7997                                 goto out;
7998                         }
7999                 }
8000
8001                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8002                         /* Success. */
8003                         ret = 0;
8004                         break;
8005                 }
8006         }
8007
8008 out:
8009         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8010 out_nofree:
8011         return ret;
8012 }
8013
8014 static void __devinit tg3_init_link_config(struct tg3 *tp)
8015 {
8016         tp->link_config.advertising =
8017                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8018                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8019                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8020                  ADVERTISED_Autoneg | ADVERTISED_MII);
8021         tp->link_config.speed = SPEED_INVALID;
8022         tp->link_config.duplex = DUPLEX_INVALID;
8023         tp->link_config.autoneg = AUTONEG_ENABLE;
8024         netif_carrier_off(tp->dev);
8025         tp->link_config.active_speed = SPEED_INVALID;
8026         tp->link_config.active_duplex = DUPLEX_INVALID;
8027         tp->link_config.phy_is_low_power = 0;
8028         tp->link_config.orig_speed = SPEED_INVALID;
8029         tp->link_config.orig_duplex = DUPLEX_INVALID;
8030         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8031 }
8032
8033 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8034 {
8035         tp->bufmgr_config.mbuf_read_dma_low_water =
8036                 DEFAULT_MB_RDMA_LOW_WATER;
8037         tp->bufmgr_config.mbuf_mac_rx_low_water =
8038                 DEFAULT_MB_MACRX_LOW_WATER;
8039         tp->bufmgr_config.mbuf_high_water =
8040                 DEFAULT_MB_HIGH_WATER;
8041
8042         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8043                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8044         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8045                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8046         tp->bufmgr_config.mbuf_high_water_jumbo =
8047                 DEFAULT_MB_HIGH_WATER_JUMBO;
8048
8049         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8050         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8051 }
8052
8053 static char * __devinit tg3_phy_string(struct tg3 *tp)
8054 {
8055         switch (tp->phy_id & PHY_ID_MASK) {
8056         case PHY_ID_BCM5400:    return "5400";
8057         case PHY_ID_BCM5401:    return "5401";
8058         case PHY_ID_BCM5411:    return "5411";
8059         case PHY_ID_BCM5701:    return "5701";
8060         case PHY_ID_BCM5703:    return "5703";
8061         case PHY_ID_BCM5704:    return "5704";
8062         case PHY_ID_BCM5705:    return "5705";
8063         case PHY_ID_BCM5750:    return "5750";
8064         case PHY_ID_BCM8002:    return "8002";
8065         case PHY_ID_SERDES:     return "serdes";
8066         default:                return "unknown";
8067         };
8068 }
8069
8070 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8071 {
8072         struct pci_dev *peer;
8073         unsigned int func, devnr = tp->pdev->devfn & ~7;
8074
8075         for (func = 0; func < 8; func++) {
8076                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8077                 if (peer && peer != tp->pdev)
8078                         break;
8079                 pci_dev_put(peer);
8080         }
8081         if (!peer || peer == tp->pdev)
8082                 BUG();
8083
8084         /*
8085          * We don't need to keep the refcount elevated; there's no way
8086          * to remove one half of this device without removing the other
8087          */
8088         pci_dev_put(peer);
8089
8090         return peer;
8091 }
8092
8093 static int __devinit tg3_init_one(struct pci_dev *pdev,
8094                                   const struct pci_device_id *ent)
8095 {
8096         static int tg3_version_printed = 0;
8097         unsigned long tg3reg_base, tg3reg_len;
8098         struct net_device *dev;
8099         struct tg3 *tp;
8100         int i, err, pci_using_dac, pm_cap;
8101
8102         if (tg3_version_printed++ == 0)
8103                 printk(KERN_INFO "%s", version);
8104
8105         err = pci_enable_device(pdev);
8106         if (err) {
8107                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8108                        "aborting.\n");
8109                 return err;
8110         }
8111
8112         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8113                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8114                        "base address, aborting.\n");
8115                 err = -ENODEV;
8116                 goto err_out_disable_pdev;
8117         }
8118
8119         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8120         if (err) {
8121                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8122                        "aborting.\n");
8123                 goto err_out_disable_pdev;
8124         }
8125
8126         pci_set_master(pdev);
8127
8128         /* Find power-management capability. */
8129         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8130         if (pm_cap == 0) {
8131                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8132                        "aborting.\n");
8133                 err = -EIO;
8134                 goto err_out_free_res;
8135         }
8136
8137         /* Configure DMA attributes. */
8138         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8139         if (!err) {
8140                 pci_using_dac = 1;
8141                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8142                 if (err < 0) {
8143                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8144                                "for consistent allocations\n");
8145                         goto err_out_free_res;
8146                 }
8147         } else {
8148                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8149                 if (err) {
8150                         printk(KERN_ERR PFX "No usable DMA configuration, "
8151                                "aborting.\n");
8152                         goto err_out_free_res;
8153                 }
8154                 pci_using_dac = 0;
8155         }
8156
8157         tg3reg_base = pci_resource_start(pdev, 0);
8158         tg3reg_len = pci_resource_len(pdev, 0);
8159
8160         dev = alloc_etherdev(sizeof(*tp));
8161         if (!dev) {
8162                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8163                 err = -ENOMEM;
8164                 goto err_out_free_res;
8165         }
8166
8167         SET_MODULE_OWNER(dev);
8168         SET_NETDEV_DEV(dev, &pdev->dev);
8169
8170         if (pci_using_dac)
8171                 dev->features |= NETIF_F_HIGHDMA;
8172 #if TG3_VLAN_TAG_USED
8173         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8174         dev->vlan_rx_register = tg3_vlan_rx_register;
8175         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8176 #endif
8177
8178         tp = netdev_priv(dev);
8179         tp->pdev = pdev;
8180         tp->dev = dev;
8181         tp->pm_cap = pm_cap;
8182         tp->mac_mode = TG3_DEF_MAC_MODE;
8183         tp->rx_mode = TG3_DEF_RX_MODE;
8184         tp->tx_mode = TG3_DEF_TX_MODE;
8185         tp->mi_mode = MAC_MI_MODE_BASE;
8186         if (tg3_debug > 0)
8187                 tp->msg_enable = tg3_debug;
8188         else
8189                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8190
8191         /* The word/byte swap controls here control register access byte
8192          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8193          * setting below.
8194          */
8195         tp->misc_host_ctrl =
8196                 MISC_HOST_CTRL_MASK_PCI_INT |
8197                 MISC_HOST_CTRL_WORD_SWAP |
8198                 MISC_HOST_CTRL_INDIR_ACCESS |
8199                 MISC_HOST_CTRL_PCISTATE_RW;
8200
8201         /* The NONFRM (non-frame) byte/word swap controls take effect
8202          * on descriptor entries, anything which isn't packet data.
8203          *
8204          * The StrongARM chips on the board (one for tx, one for rx)
8205          * are running in big-endian mode.
8206          */
8207         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8208                         GRC_MODE_WSWAP_NONFRM_DATA);
8209 #ifdef __BIG_ENDIAN
8210         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8211 #endif
8212         spin_lock_init(&tp->lock);
8213         spin_lock_init(&tp->tx_lock);
8214         spin_lock_init(&tp->indirect_lock);
8215         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8216
8217         tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
8218         if (tp->regs == 0UL) {
8219                 printk(KERN_ERR PFX "Cannot map device registers, "
8220                        "aborting.\n");
8221                 err = -ENOMEM;
8222                 goto err_out_free_dev;
8223         }
8224
8225         tg3_init_link_config(tp);
8226
8227         tg3_init_bufmgr_config(tp);
8228
8229         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8230         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8231         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8232
8233         dev->open = tg3_open;
8234         dev->stop = tg3_close;
8235         dev->get_stats = tg3_get_stats;
8236         dev->set_multicast_list = tg3_set_rx_mode;
8237         dev->set_mac_address = tg3_set_mac_addr;
8238         dev->do_ioctl = tg3_ioctl;
8239         dev->tx_timeout = tg3_tx_timeout;
8240         dev->poll = tg3_poll;
8241         dev->ethtool_ops = &tg3_ethtool_ops;
8242         dev->weight = 64;
8243         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8244         dev->change_mtu = tg3_change_mtu;
8245         dev->irq = pdev->irq;
8246 #ifdef CONFIG_NET_POLL_CONTROLLER
8247         dev->poll_controller = tg3_poll_controller;
8248 #endif
8249
8250         err = tg3_get_invariants(tp);
8251         if (err) {
8252                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8253                        "aborting.\n");
8254                 goto err_out_iounmap;
8255         }
8256
8257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8258             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8259                 tp->bufmgr_config.mbuf_read_dma_low_water =
8260                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8261                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8262                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8263                 tp->bufmgr_config.mbuf_high_water =
8264                         DEFAULT_MB_HIGH_WATER_5705;
8265         }
8266
8267 #if TG3_TSO_SUPPORT != 0
8268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8269             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8270             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8271             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8272              GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8273                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8274         } else {
8275                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8276         }
8277
8278         /* TSO is off by default, user can enable using ethtool.  */
8279 #if 0
8280         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8281                 dev->features |= NETIF_F_TSO;
8282 #endif
8283
8284 #endif
8285
8286         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8287             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8288             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8289                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8290                 tp->rx_pending = 63;
8291         }
8292
8293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8294                 tp->pdev_peer = tg3_find_5704_peer(tp);
8295
8296         err = tg3_get_device_address(tp);
8297         if (err) {
8298                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8299                        "aborting.\n");
8300                 goto err_out_iounmap;
8301         }
8302
8303         /*
8304          * Reset chip in case UNDI or EFI driver did not shutdown
8305          * DMA self test will enable WDMAC and we'll see (spurious)
8306          * pending DMA on the PCI bus at that point.
8307          */
8308         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8309             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8310                 pci_save_state(tp->pdev, tp->pci_cfg_state);
8311                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8312                 tg3_halt(tp);
8313         }
8314
8315         err = tg3_test_dma(tp);
8316         if (err) {
8317                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8318                 goto err_out_iounmap;
8319         }
8320
8321         /* Tigon3 can do ipv4 only... and some chips have buggy
8322          * checksumming.
8323          */
8324         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8325                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8326                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8327         } else
8328                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8329
8330         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8331                 dev->features &= ~NETIF_F_HIGHDMA;
8332
8333         err = register_netdev(dev);
8334         if (err) {
8335                 printk(KERN_ERR PFX "Cannot register net device, "
8336                        "aborting.\n");
8337                 goto err_out_iounmap;
8338         }
8339
8340         pci_set_drvdata(pdev, dev);
8341
8342         /* Now that we have fully setup the chip, save away a snapshot
8343          * of the PCI config space.  We need to restore this after
8344          * GRC_MISC_CFG core clock resets and some resume events.
8345          */
8346         pci_save_state(tp->pdev, tp->pci_cfg_state);
8347
8348         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8349                dev->name,
8350                tp->board_part_number,
8351                tp->pci_chip_rev_id,
8352                tg3_phy_string(tp),
8353                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8354                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8355                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8356                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8357                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8358                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8359
8360         for (i = 0; i < 6; i++)
8361                 printk("%2.2x%c", dev->dev_addr[i],
8362                        i == 5 ? '\n' : ':');
8363
8364         printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
8365                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8366                "TSOcap[%d] \n",
8367                dev->name,
8368                (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
8369                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8370                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8371                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8372                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8373                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8374                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8375                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8376
8377         return 0;
8378
8379 err_out_iounmap:
8380         iounmap((void *) tp->regs);
8381
8382 err_out_free_dev:
8383         free_netdev(dev);
8384
8385 err_out_free_res:
8386         pci_release_regions(pdev);
8387
8388 err_out_disable_pdev:
8389         pci_disable_device(pdev);
8390         pci_set_drvdata(pdev, NULL);
8391         return err;
8392 }
8393
8394 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8395 {
8396         struct net_device *dev = pci_get_drvdata(pdev);
8397
8398         if (dev) {
8399                 struct tg3 *tp = netdev_priv(dev);
8400
8401                 unregister_netdev(dev);
8402                 iounmap((void *)tp->regs);
8403                 free_netdev(dev);
8404                 pci_release_regions(pdev);
8405                 pci_disable_device(pdev);
8406                 pci_set_drvdata(pdev, NULL);
8407         }
8408 }
8409
8410 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8411 {
8412         struct net_device *dev = pci_get_drvdata(pdev);
8413         struct tg3 *tp = netdev_priv(dev);
8414         int err;
8415
8416         if (!netif_running(dev))
8417                 return 0;
8418
8419         tg3_netif_stop(tp);
8420
8421         del_timer_sync(&tp->timer);
8422
8423         spin_lock_irq(&tp->lock);
8424         spin_lock(&tp->tx_lock);
8425         tg3_disable_ints(tp);
8426         spin_unlock(&tp->tx_lock);
8427         spin_unlock_irq(&tp->lock);
8428
8429         netif_device_detach(dev);
8430
8431         spin_lock_irq(&tp->lock);
8432         spin_lock(&tp->tx_lock);
8433         tg3_halt(tp);
8434         spin_unlock(&tp->tx_lock);
8435         spin_unlock_irq(&tp->lock);
8436
8437         err = tg3_set_power_state(tp, state);
8438         if (err) {
8439                 spin_lock_irq(&tp->lock);
8440                 spin_lock(&tp->tx_lock);
8441
8442                 tg3_init_hw(tp);
8443
8444                 tp->timer.expires = jiffies + tp->timer_offset;
8445                 add_timer(&tp->timer);
8446
8447                 spin_unlock(&tp->tx_lock);
8448                 spin_unlock_irq(&tp->lock);
8449
8450                 netif_device_attach(dev);
8451                 tg3_netif_start(tp);
8452         }
8453
8454         return err;
8455 }
8456
8457 static int tg3_resume(struct pci_dev *pdev)
8458 {
8459         struct net_device *dev = pci_get_drvdata(pdev);
8460         struct tg3 *tp = netdev_priv(dev);
8461         int err;
8462
8463         if (!netif_running(dev))
8464                 return 0;
8465
8466         pci_restore_state(tp->pdev, tp->pci_cfg_state);
8467
8468         err = tg3_set_power_state(tp, 0);
8469         if (err)
8470                 return err;
8471
8472         netif_device_attach(dev);
8473
8474         spin_lock_irq(&tp->lock);
8475         spin_lock(&tp->tx_lock);
8476
8477         tg3_init_hw(tp);
8478
8479         tp->timer.expires = jiffies + tp->timer_offset;
8480         add_timer(&tp->timer);
8481
8482         tg3_enable_ints(tp);
8483
8484         spin_unlock(&tp->tx_lock);
8485         spin_unlock_irq(&tp->lock);
8486
8487         tg3_netif_start(tp);
8488
8489         return 0;
8490 }
8491
8492 static struct pci_driver tg3_driver = {
8493         .name           = DRV_MODULE_NAME,
8494         .id_table       = tg3_pci_tbl,
8495         .probe          = tg3_init_one,
8496         .remove         = __devexit_p(tg3_remove_one),
8497         .suspend        = tg3_suspend,
8498         .resume         = tg3_resume
8499 };
8500
8501 static int __init tg3_init(void)
8502 {
8503         return pci_module_init(&tg3_driver);
8504 }
8505
8506 static void __exit tg3_cleanup(void)
8507 {
8508         pci_unregister_driver(&tg3_driver);
8509 }
8510
8511 module_init(tg3_init);
8512 module_exit(tg3_cleanup);