vserver 1.9.3
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  *
8  * Firmware is:
9  *      Copyright (C) 2000-2003 Broadcom Corporation.
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
33
34 #include <net/checksum.h>
35
36 #include <asm/system.h>
37 #include <asm/io.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
40
41 #ifdef CONFIG_SPARC64
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
44 #include <asm/pbm.h>
45 #endif
46
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
49 #else
50 #define TG3_VLAN_TAG_USED 0
51 #endif
52
53 #ifdef NETIF_F_TSO
54 #define TG3_TSO_SUPPORT 1
55 #else
56 #define TG3_TSO_SUPPORT 0
57 #endif
58
59 #include "tg3.h"
60
61 #define DRV_MODULE_NAME         "tg3"
62 #define PFX DRV_MODULE_NAME     ": "
63 #define DRV_MODULE_VERSION      "3.10"
64 #define DRV_MODULE_RELDATE      "September 14, 2004"
65
66 #define TG3_DEF_MAC_MODE        0
67 #define TG3_DEF_RX_MODE         0
68 #define TG3_DEF_TX_MODE         0
69 #define TG3_DEF_MSG_ENABLE        \
70         (NETIF_MSG_DRV          | \
71          NETIF_MSG_PROBE        | \
72          NETIF_MSG_LINK         | \
73          NETIF_MSG_TIMER        | \
74          NETIF_MSG_IFDOWN       | \
75          NETIF_MSG_IFUP         | \
76          NETIF_MSG_RX_ERR       | \
77          NETIF_MSG_TX_ERR)
78
79 /* length of time before we decide the hardware is borked,
80  * and dev->tx_timeout() should be called to fix the problem
81  */
82 #define TG3_TX_TIMEOUT                  (5 * HZ)
83
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU                     60
86 #define TG3_MAX_MTU(tp) \
87         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
108          512 : 1024)
109
110 #define TG3_TX_RING_SIZE                512
111 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
112
113 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_RING_SIZE)
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116                                  TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118                                    TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
120                                  TG3_TX_RING_SIZE)
121 #define TX_RING_GAP(TP) \
122         (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP)                                              \
124         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
125           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
126           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 static char version[] __devinitdata =
139         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_PARM(tg3_debug, "i");
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
151           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { 0, }
223 };
224
225 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
226
227 struct {
228         char string[ETH_GSTRING_LEN];
229 } ethtool_stats_keys[TG3_NUM_STATS] = {
230         { "rx_octets" },
231         { "rx_fragments" },
232         { "rx_ucast_packets" },
233         { "rx_mcast_packets" },
234         { "rx_bcast_packets" },
235         { "rx_fcs_errors" },
236         { "rx_align_errors" },
237         { "rx_xon_pause_rcvd" },
238         { "rx_xoff_pause_rcvd" },
239         { "rx_mac_ctrl_rcvd" },
240         { "rx_xoff_entered" },
241         { "rx_frame_too_long_errors" },
242         { "rx_jabbers" },
243         { "rx_undersize_packets" },
244         { "rx_in_length_errors" },
245         { "rx_out_length_errors" },
246         { "rx_64_or_less_octet_packets" },
247         { "rx_65_to_127_octet_packets" },
248         { "rx_128_to_255_octet_packets" },
249         { "rx_256_to_511_octet_packets" },
250         { "rx_512_to_1023_octet_packets" },
251         { "rx_1024_to_1522_octet_packets" },
252         { "rx_1523_to_2047_octet_packets" },
253         { "rx_2048_to_4095_octet_packets" },
254         { "rx_4096_to_8191_octet_packets" },
255         { "rx_8192_to_9022_octet_packets" },
256
257         { "tx_octets" },
258         { "tx_collisions" },
259
260         { "tx_xon_sent" },
261         { "tx_xoff_sent" },
262         { "tx_flow_control" },
263         { "tx_mac_errors" },
264         { "tx_single_collisions" },
265         { "tx_mult_collisions" },
266         { "tx_deferred" },
267         { "tx_excessive_collisions" },
268         { "tx_late_collisions" },
269         { "tx_collide_2times" },
270         { "tx_collide_3times" },
271         { "tx_collide_4times" },
272         { "tx_collide_5times" },
273         { "tx_collide_6times" },
274         { "tx_collide_7times" },
275         { "tx_collide_8times" },
276         { "tx_collide_9times" },
277         { "tx_collide_10times" },
278         { "tx_collide_11times" },
279         { "tx_collide_12times" },
280         { "tx_collide_13times" },
281         { "tx_collide_14times" },
282         { "tx_collide_15times" },
283         { "tx_ucast_packets" },
284         { "tx_mcast_packets" },
285         { "tx_bcast_packets" },
286         { "tx_carrier_sense_errors" },
287         { "tx_discards" },
288         { "tx_errors" },
289
290         { "dma_writeq_full" },
291         { "dma_write_prioq_full" },
292         { "rxbds_empty" },
293         { "rx_discards" },
294         { "rx_errors" },
295         { "rx_threshold_hit" },
296
297         { "dma_readq_full" },
298         { "dma_read_prioq_full" },
299         { "tx_comp_queue_full" },
300
301         { "ring_set_send_prod_index" },
302         { "ring_status_update" },
303         { "nic_irqs" },
304         { "nic_avoided_irqs" },
305         { "nic_tx_threshold_hit" }
306 };
307
308 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
309 {
310         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
311                 unsigned long flags;
312
313                 spin_lock_irqsave(&tp->indirect_lock, flags);
314                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
315                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
316                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
317         } else {
318                 writel(val, tp->regs + off);
319                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
320                         readl(tp->regs + off);
321         }
322 }
323
324 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
325 {
326         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
327                 unsigned long flags;
328
329                 spin_lock_irqsave(&tp->indirect_lock, flags);
330                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
331                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
332                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
333         } else {
334                 void __iomem *dest = tp->regs + off;
335                 writel(val, dest);
336                 readl(dest);    /* always flush PCI write */
337         }
338 }
339
340 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
341 {
342         void __iomem *mbox = tp->regs + off;
343         writel(val, mbox);
344         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
345                 readl(mbox);
346 }
347
348 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
349 {
350         void __iomem *mbox = tp->regs + off;
351         writel(val, mbox);
352         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
353                 writel(val, mbox);
354         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
355                 readl(mbox);
356 }
357
358 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
359 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
360 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
361
362 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
363 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
364 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
365 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
366 #define tr32(reg)               readl(tp->regs + (reg))
367 #define tr16(reg)               readw(tp->regs + (reg))
368 #define tr8(reg)                readb(tp->regs + (reg))
369
370 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
371 {
372         unsigned long flags;
373
374         spin_lock_irqsave(&tp->indirect_lock, flags);
375         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
376         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
377
378         /* Always leave this as zero. */
379         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381 }
382
383 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
384 {
385         unsigned long flags;
386
387         spin_lock_irqsave(&tp->indirect_lock, flags);
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
389         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
390
391         /* Always leave this as zero. */
392         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
393         spin_unlock_irqrestore(&tp->indirect_lock, flags);
394 }
395
396 static void tg3_disable_ints(struct tg3 *tp)
397 {
398         tw32(TG3PCI_MISC_HOST_CTRL,
399              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
400         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
401         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
402 }
403
404 static inline void tg3_cond_int(struct tg3 *tp)
405 {
406         if (tp->hw_status->status & SD_STATUS_UPDATED)
407                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
408 }
409
410 static void tg3_enable_ints(struct tg3 *tp)
411 {
412         tw32(TG3PCI_MISC_HOST_CTRL,
413              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
414         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
415         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
416
417         tg3_cond_int(tp);
418 }
419
420 static inline void tg3_netif_stop(struct tg3 *tp)
421 {
422         netif_poll_disable(tp->dev);
423         netif_tx_disable(tp->dev);
424 }
425
426 static inline void tg3_netif_start(struct tg3 *tp)
427 {
428         netif_wake_queue(tp->dev);
429         /* NOTE: unconditional netif_wake_queue is only appropriate
430          * so long as all callers are assured to have free tx slots
431          * (such as after tg3_init_hw)
432          */
433         netif_poll_enable(tp->dev);
434         tg3_cond_int(tp);
435 }
436
437 static void tg3_switch_clocks(struct tg3 *tp)
438 {
439         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
440         u32 orig_clock_ctrl;
441
442         orig_clock_ctrl = clock_ctrl;
443         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
444                        CLOCK_CTRL_CLKRUN_OENABLE |
445                        0x1f);
446         tp->pci_clock_ctrl = clock_ctrl;
447
448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
450                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
451                         tw32_f(TG3PCI_CLOCK_CTRL,
452                                clock_ctrl | CLOCK_CTRL_625_CORE);
453                         udelay(40);
454                 }
455         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
456                 tw32_f(TG3PCI_CLOCK_CTRL,
457                      clock_ctrl |
458                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
459                 udelay(40);
460                 tw32_f(TG3PCI_CLOCK_CTRL,
461                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
462                 udelay(40);
463         }
464         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
465         udelay(40);
466 }
467
468 #define PHY_BUSY_LOOPS  5000
469
470 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
471 {
472         u32 frame_val;
473         int loops, ret;
474
475         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
476                 tw32_f(MAC_MI_MODE,
477                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
478                 udelay(80);
479         }
480
481         *val = 0xffffffff;
482
483         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
484                       MI_COM_PHY_ADDR_MASK);
485         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
486                       MI_COM_REG_ADDR_MASK);
487         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
488         
489         tw32_f(MAC_MI_COM, frame_val);
490
491         loops = PHY_BUSY_LOOPS;
492         while (loops-- > 0) {
493                 udelay(10);
494                 frame_val = tr32(MAC_MI_COM);
495
496                 if ((frame_val & MI_COM_BUSY) == 0) {
497                         udelay(5);
498                         frame_val = tr32(MAC_MI_COM);
499                         break;
500                 }
501         }
502
503         ret = -EBUSY;
504         if (loops > 0) {
505                 *val = frame_val & MI_COM_DATA_MASK;
506                 ret = 0;
507         }
508
509         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
510                 tw32_f(MAC_MI_MODE, tp->mi_mode);
511                 udelay(80);
512         }
513
514         return ret;
515 }
516
517 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
518 {
519         u32 frame_val;
520         int loops, ret;
521
522         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
523                 tw32_f(MAC_MI_MODE,
524                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
525                 udelay(80);
526         }
527
528         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
529                       MI_COM_PHY_ADDR_MASK);
530         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
531                       MI_COM_REG_ADDR_MASK);
532         frame_val |= (val & MI_COM_DATA_MASK);
533         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
534         
535         tw32_f(MAC_MI_COM, frame_val);
536
537         loops = PHY_BUSY_LOOPS;
538         while (loops-- > 0) {
539                 udelay(10);
540                 frame_val = tr32(MAC_MI_COM);
541                 if ((frame_val & MI_COM_BUSY) == 0) {
542                         udelay(5);
543                         frame_val = tr32(MAC_MI_COM);
544                         break;
545                 }
546         }
547
548         ret = -EBUSY;
549         if (loops > 0)
550                 ret = 0;
551
552         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
553                 tw32_f(MAC_MI_MODE, tp->mi_mode);
554                 udelay(80);
555         }
556
557         return ret;
558 }
559
560 static void tg3_phy_set_wirespeed(struct tg3 *tp)
561 {
562         u32 val;
563
564         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
565                 return;
566
567         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
568         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
569         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
570 }
571
572 static int tg3_bmcr_reset(struct tg3 *tp)
573 {
574         u32 phy_control;
575         int limit, err;
576
577         /* OK, reset it, and poll the BMCR_RESET bit until it
578          * clears or we time out.
579          */
580         phy_control = BMCR_RESET;
581         err = tg3_writephy(tp, MII_BMCR, phy_control);
582         if (err != 0)
583                 return -EBUSY;
584
585         limit = 5000;
586         while (limit--) {
587                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
588                 if (err != 0)
589                         return -EBUSY;
590
591                 if ((phy_control & BMCR_RESET) == 0) {
592                         udelay(40);
593                         break;
594                 }
595                 udelay(10);
596         }
597         if (limit <= 0)
598                 return -EBUSY;
599
600         return 0;
601 }
602
603 static int tg3_wait_macro_done(struct tg3 *tp)
604 {
605         int limit = 100;
606
607         while (limit--) {
608                 u32 tmp32;
609
610                 tg3_readphy(tp, 0x16, &tmp32);
611                 if ((tmp32 & 0x1000) == 0)
612                         break;
613         }
614         if (limit <= 0)
615                 return -EBUSY;
616
617         return 0;
618 }
619
620 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
621 {
622         static const u32 test_pat[4][6] = {
623         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
624         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
625         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
626         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
627         };
628         int chan;
629
630         for (chan = 0; chan < 4; chan++) {
631                 int i;
632
633                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
634                              (chan * 0x2000) | 0x0200);
635                 tg3_writephy(tp, 0x16, 0x0002);
636
637                 for (i = 0; i < 6; i++)
638                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
639                                      test_pat[chan][i]);
640
641                 tg3_writephy(tp, 0x16, 0x0202);
642                 if (tg3_wait_macro_done(tp)) {
643                         *resetp = 1;
644                         return -EBUSY;
645                 }
646
647                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
648                              (chan * 0x2000) | 0x0200);
649                 tg3_writephy(tp, 0x16, 0x0082);
650                 if (tg3_wait_macro_done(tp)) {
651                         *resetp = 1;
652                         return -EBUSY;
653                 }
654
655                 tg3_writephy(tp, 0x16, 0x0802);
656                 if (tg3_wait_macro_done(tp)) {
657                         *resetp = 1;
658                         return -EBUSY;
659                 }
660
661                 for (i = 0; i < 6; i += 2) {
662                         u32 low, high;
663
664                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
665                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
666                         if (tg3_wait_macro_done(tp)) {
667                                 *resetp = 1;
668                                 return -EBUSY;
669                         }
670                         low &= 0x7fff;
671                         high &= 0x000f;
672                         if (low != test_pat[chan][i] ||
673                             high != test_pat[chan][i+1]) {
674                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
675                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
676                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
677
678                                 return -EBUSY;
679                         }
680                 }
681         }
682
683         return 0;
684 }
685
686 static int tg3_phy_reset_chanpat(struct tg3 *tp)
687 {
688         int chan;
689
690         for (chan = 0; chan < 4; chan++) {
691                 int i;
692
693                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
694                              (chan * 0x2000) | 0x0200);
695                 tg3_writephy(tp, 0x16, 0x0002);
696                 for (i = 0; i < 6; i++)
697                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
698                 tg3_writephy(tp, 0x16, 0x0202);
699                 if (tg3_wait_macro_done(tp))
700                         return -EBUSY;
701         }
702
703         return 0;
704 }
705
706 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
707 {
708         u32 reg32, phy9_orig;
709         int retries, do_phy_reset, err;
710
711         retries = 10;
712         do_phy_reset = 1;
713         do {
714                 if (do_phy_reset) {
715                         err = tg3_bmcr_reset(tp);
716                         if (err)
717                                 return err;
718                         do_phy_reset = 0;
719                 }
720
721                 /* Disable transmitter and interrupt.  */
722                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
723                 reg32 |= 0x3000;
724                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
725
726                 /* Set full-duplex, 1000 mbps.  */
727                 tg3_writephy(tp, MII_BMCR,
728                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
729
730                 /* Set to master mode.  */
731                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
732                 tg3_writephy(tp, MII_TG3_CTRL,
733                              (MII_TG3_CTRL_AS_MASTER |
734                               MII_TG3_CTRL_ENABLE_AS_MASTER));
735
736                 /* Enable SM_DSP_CLOCK and 6dB.  */
737                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
738
739                 /* Block the PHY control access.  */
740                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
741                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
742
743                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
744                 if (!err)
745                         break;
746         } while (--retries);
747
748         err = tg3_phy_reset_chanpat(tp);
749         if (err)
750                 return err;
751
752         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
753         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
754
755         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
756         tg3_writephy(tp, 0x16, 0x0000);
757
758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
759             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
760                 /* Set Extended packet length bit for jumbo frames */
761                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
762         }
763         else {
764                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
765         }
766
767         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
768
769         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
770         reg32 &= ~0x3000;
771         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
772
773         return err;
774 }
775
776 /* This will reset the tigon3 PHY if there is no valid
777  * link unless the FORCE argument is non-zero.
778  */
779 static int tg3_phy_reset(struct tg3 *tp)
780 {
781         u32 phy_status;
782         int err;
783
784         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
785         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
786         if (err != 0)
787                 return -EBUSY;
788
789         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
792                 err = tg3_phy_reset_5703_4_5(tp);
793                 if (err)
794                         return err;
795                 goto out;
796         }
797
798         err = tg3_bmcr_reset(tp);
799         if (err)
800                 return err;
801
802 out:
803         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
804                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
805                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
806                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
807                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
808                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
809                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
810         }
811         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
812                 tg3_writephy(tp, 0x1c, 0x8d68);
813                 tg3_writephy(tp, 0x1c, 0x8d68);
814         }
815         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
816                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
817                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
818                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
819                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
820                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
821                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
822                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
823                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
824         }
825         /* Set Extended packet length bit (bit 14) on all chips that */
826         /* support jumbo frames */
827         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
828                 /* Cannot do read-modify-write on 5401 */
829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
830         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
831                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
832                 u32 phy_reg;
833
834                 /* Set bit 14 with read-modify-write to preserve other bits */
835                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
836                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
837                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
838         }
839         tg3_phy_set_wirespeed(tp);
840         return 0;
841 }
842
843 static void tg3_frob_aux_power(struct tg3 *tp)
844 {
845         struct tg3 *tp_peer = tp;
846
847         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
848                 return;
849
850         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
851                 tp_peer = pci_get_drvdata(tp->pdev_peer);
852                 if (!tp_peer)
853                         BUG();
854         }
855
856
857         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
858             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
859                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
860                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
861                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
862                              (GRC_LCLCTRL_GPIO_OE0 |
863                               GRC_LCLCTRL_GPIO_OE1 |
864                               GRC_LCLCTRL_GPIO_OE2 |
865                               GRC_LCLCTRL_GPIO_OUTPUT0 |
866                               GRC_LCLCTRL_GPIO_OUTPUT1));
867                         udelay(100);
868                 } else {
869                         if (tp_peer != tp &&
870                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
871                                 return;
872
873                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
874                              (GRC_LCLCTRL_GPIO_OE0 |
875                               GRC_LCLCTRL_GPIO_OE1 |
876                               GRC_LCLCTRL_GPIO_OE2 |
877                               GRC_LCLCTRL_GPIO_OUTPUT1 |
878                               GRC_LCLCTRL_GPIO_OUTPUT2));
879                         udelay(100);
880
881                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
882                              (GRC_LCLCTRL_GPIO_OE0 |
883                               GRC_LCLCTRL_GPIO_OE1 |
884                               GRC_LCLCTRL_GPIO_OE2 |
885                               GRC_LCLCTRL_GPIO_OUTPUT0 |
886                               GRC_LCLCTRL_GPIO_OUTPUT1 |
887                               GRC_LCLCTRL_GPIO_OUTPUT2));
888                         udelay(100);
889
890                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
891                              (GRC_LCLCTRL_GPIO_OE0 |
892                               GRC_LCLCTRL_GPIO_OE1 |
893                               GRC_LCLCTRL_GPIO_OE2 |
894                               GRC_LCLCTRL_GPIO_OUTPUT0 |
895                               GRC_LCLCTRL_GPIO_OUTPUT1));
896                         udelay(100);
897                 }
898         } else {
899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
900                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
901                         if (tp_peer != tp &&
902                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
903                                 return;
904
905                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
906                              (GRC_LCLCTRL_GPIO_OE1 |
907                               GRC_LCLCTRL_GPIO_OUTPUT1));
908                         udelay(100);
909
910                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
911                              (GRC_LCLCTRL_GPIO_OE1));
912                         udelay(100);
913
914                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
915                              (GRC_LCLCTRL_GPIO_OE1 |
916                               GRC_LCLCTRL_GPIO_OUTPUT1));
917                         udelay(100);
918                 }
919         }
920 }
921
922 static int tg3_setup_phy(struct tg3 *, int);
923
924 #define RESET_KIND_SHUTDOWN     0
925 #define RESET_KIND_INIT         1
926 #define RESET_KIND_SUSPEND      2
927
928 static void tg3_write_sig_post_reset(struct tg3 *, int);
929
930 static int tg3_set_power_state(struct tg3 *tp, int state)
931 {
932         u32 misc_host_ctrl;
933         u16 power_control, power_caps;
934         int pm = tp->pm_cap;
935
936         /* Make sure register accesses (indirect or otherwise)
937          * will function correctly.
938          */
939         pci_write_config_dword(tp->pdev,
940                                TG3PCI_MISC_HOST_CTRL,
941                                tp->misc_host_ctrl);
942
943         pci_read_config_word(tp->pdev,
944                              pm + PCI_PM_CTRL,
945                              &power_control);
946         power_control |= PCI_PM_CTRL_PME_STATUS;
947         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
948         switch (state) {
949         case 0:
950                 power_control |= 0;
951                 pci_write_config_word(tp->pdev,
952                                       pm + PCI_PM_CTRL,
953                                       power_control);
954                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
955                 udelay(100);
956
957                 return 0;
958
959         case 1:
960                 power_control |= 1;
961                 break;
962
963         case 2:
964                 power_control |= 2;
965                 break;
966
967         case 3:
968                 power_control |= 3;
969                 break;
970
971         default:
972                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
973                        "requested.\n",
974                        tp->dev->name, state);
975                 return -EINVAL;
976         };
977
978         power_control |= PCI_PM_CTRL_PME_ENABLE;
979
980         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
981         tw32(TG3PCI_MISC_HOST_CTRL,
982              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
983
984         if (tp->link_config.phy_is_low_power == 0) {
985                 tp->link_config.phy_is_low_power = 1;
986                 tp->link_config.orig_speed = tp->link_config.speed;
987                 tp->link_config.orig_duplex = tp->link_config.duplex;
988                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
989         }
990
991         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
992                 tp->link_config.speed = SPEED_10;
993                 tp->link_config.duplex = DUPLEX_HALF;
994                 tp->link_config.autoneg = AUTONEG_ENABLE;
995                 tg3_setup_phy(tp, 0);
996         }
997
998         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
999
1000         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1001                 u32 mac_mode;
1002
1003                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1004                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1005                         udelay(40);
1006
1007                         mac_mode = MAC_MODE_PORT_MODE_MII;
1008
1009                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1010                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1011                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1012                 } else {
1013                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1014                 }
1015
1016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1017                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1018
1019                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1020                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1021                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1022
1023                 tw32_f(MAC_MODE, mac_mode);
1024                 udelay(100);
1025
1026                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1027                 udelay(10);
1028         }
1029
1030         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1031             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1032              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1033                 u32 base_val;
1034
1035                 base_val = tp->pci_clock_ctrl;
1036                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1037                              CLOCK_CTRL_TXCLK_DISABLE);
1038
1039                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1040                      CLOCK_CTRL_ALTCLK |
1041                      CLOCK_CTRL_PWRDOWN_PLL133);
1042                 udelay(40);
1043         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1044                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1045                 u32 newbits1, newbits2;
1046
1047                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1048                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1049                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1050                                     CLOCK_CTRL_TXCLK_DISABLE |
1051                                     CLOCK_CTRL_ALTCLK);
1052                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1053                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1054                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1055                         newbits1 = CLOCK_CTRL_625_CORE;
1056                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1057                 } else {
1058                         newbits1 = CLOCK_CTRL_ALTCLK;
1059                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1060                 }
1061
1062                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1063                 udelay(40);
1064
1065                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1066                 udelay(40);
1067
1068                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1069                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1070                         u32 newbits3;
1071
1072                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1073                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1074                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1075                                             CLOCK_CTRL_TXCLK_DISABLE |
1076                                             CLOCK_CTRL_44MHZ_CORE);
1077                         } else {
1078                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1079                         }
1080
1081                         tw32_f(TG3PCI_CLOCK_CTRL,
1082                                          tp->pci_clock_ctrl | newbits3);
1083                         udelay(40);
1084                 }
1085         }
1086
1087         tg3_frob_aux_power(tp);
1088
1089         /* Finally, set the new power state. */
1090         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1091
1092         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1093
1094         return 0;
1095 }
1096
1097 static void tg3_link_report(struct tg3 *tp)
1098 {
1099         if (!netif_carrier_ok(tp->dev)) {
1100                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1101         } else {
1102                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1103                        tp->dev->name,
1104                        (tp->link_config.active_speed == SPEED_1000 ?
1105                         1000 :
1106                         (tp->link_config.active_speed == SPEED_100 ?
1107                          100 : 10)),
1108                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1109                         "full" : "half"));
1110
1111                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1112                        "%s for RX.\n",
1113                        tp->dev->name,
1114                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1115                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1116         }
1117 }
1118
1119 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1120 {
1121         u32 new_tg3_flags = 0;
1122         u32 old_rx_mode = tp->rx_mode;
1123         u32 old_tx_mode = tp->tx_mode;
1124
1125         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1126                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1127                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1128                                 if (remote_adv & LPA_PAUSE_CAP)
1129                                         new_tg3_flags |=
1130                                                 (TG3_FLAG_RX_PAUSE |
1131                                                 TG3_FLAG_TX_PAUSE);
1132                                 else if (remote_adv & LPA_PAUSE_ASYM)
1133                                         new_tg3_flags |=
1134                                                 (TG3_FLAG_RX_PAUSE);
1135                         } else {
1136                                 if (remote_adv & LPA_PAUSE_CAP)
1137                                         new_tg3_flags |=
1138                                                 (TG3_FLAG_RX_PAUSE |
1139                                                 TG3_FLAG_TX_PAUSE);
1140                         }
1141                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1142                         if ((remote_adv & LPA_PAUSE_CAP) &&
1143                         (remote_adv & LPA_PAUSE_ASYM))
1144                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1145                 }
1146
1147                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1148                 tp->tg3_flags |= new_tg3_flags;
1149         } else {
1150                 new_tg3_flags = tp->tg3_flags;
1151         }
1152
1153         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1154                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1155         else
1156                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1157
1158         if (old_rx_mode != tp->rx_mode) {
1159                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1160         }
1161         
1162         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1163                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1164         else
1165                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1166
1167         if (old_tx_mode != tp->tx_mode) {
1168                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1169         }
1170 }
1171
1172 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1173 {
1174         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1175         case MII_TG3_AUX_STAT_10HALF:
1176                 *speed = SPEED_10;
1177                 *duplex = DUPLEX_HALF;
1178                 break;
1179
1180         case MII_TG3_AUX_STAT_10FULL:
1181                 *speed = SPEED_10;
1182                 *duplex = DUPLEX_FULL;
1183                 break;
1184
1185         case MII_TG3_AUX_STAT_100HALF:
1186                 *speed = SPEED_100;
1187                 *duplex = DUPLEX_HALF;
1188                 break;
1189
1190         case MII_TG3_AUX_STAT_100FULL:
1191                 *speed = SPEED_100;
1192                 *duplex = DUPLEX_FULL;
1193                 break;
1194
1195         case MII_TG3_AUX_STAT_1000HALF:
1196                 *speed = SPEED_1000;
1197                 *duplex = DUPLEX_HALF;
1198                 break;
1199
1200         case MII_TG3_AUX_STAT_1000FULL:
1201                 *speed = SPEED_1000;
1202                 *duplex = DUPLEX_FULL;
1203                 break;
1204
1205         default:
1206                 *speed = SPEED_INVALID;
1207                 *duplex = DUPLEX_INVALID;
1208                 break;
1209         };
1210 }
1211
1212 static int tg3_phy_copper_begin(struct tg3 *tp)
1213 {
1214         u32 new_adv;
1215         int i;
1216
1217         if (tp->link_config.phy_is_low_power) {
1218                 /* Entering low power mode.  Disable gigabit and
1219                  * 100baseT advertisements.
1220                  */
1221                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1222
1223                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1224                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1225                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1226                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1227
1228                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1229         } else if (tp->link_config.speed == SPEED_INVALID) {
1230                 tp->link_config.advertising =
1231                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1232                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1233                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1234                          ADVERTISED_Autoneg | ADVERTISED_MII);
1235
1236                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1237                         tp->link_config.advertising &=
1238                                 ~(ADVERTISED_1000baseT_Half |
1239                                   ADVERTISED_1000baseT_Full);
1240
1241                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1242                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1243                         new_adv |= ADVERTISE_10HALF;
1244                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1245                         new_adv |= ADVERTISE_10FULL;
1246                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1247                         new_adv |= ADVERTISE_100HALF;
1248                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1249                         new_adv |= ADVERTISE_100FULL;
1250                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1251
1252                 if (tp->link_config.advertising &
1253                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1254                         new_adv = 0;
1255                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1256                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1257                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1258                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1259                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1260                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1261                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1262                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1263                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1264                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1265                 } else {
1266                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1267                 }
1268         } else {
1269                 /* Asking for a specific link mode. */
1270                 if (tp->link_config.speed == SPEED_1000) {
1271                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1272                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1273
1274                         if (tp->link_config.duplex == DUPLEX_FULL)
1275                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1276                         else
1277                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1278                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1279                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1280                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1281                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1282                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1283                 } else {
1284                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1285
1286                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1287                         if (tp->link_config.speed == SPEED_100) {
1288                                 if (tp->link_config.duplex == DUPLEX_FULL)
1289                                         new_adv |= ADVERTISE_100FULL;
1290                                 else
1291                                         new_adv |= ADVERTISE_100HALF;
1292                         } else {
1293                                 if (tp->link_config.duplex == DUPLEX_FULL)
1294                                         new_adv |= ADVERTISE_10FULL;
1295                                 else
1296                                         new_adv |= ADVERTISE_10HALF;
1297                         }
1298                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1299                 }
1300         }
1301
1302         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1303             tp->link_config.speed != SPEED_INVALID) {
1304                 u32 bmcr, orig_bmcr;
1305
1306                 tp->link_config.active_speed = tp->link_config.speed;
1307                 tp->link_config.active_duplex = tp->link_config.duplex;
1308
1309                 bmcr = 0;
1310                 switch (tp->link_config.speed) {
1311                 default:
1312                 case SPEED_10:
1313                         break;
1314
1315                 case SPEED_100:
1316                         bmcr |= BMCR_SPEED100;
1317                         break;
1318
1319                 case SPEED_1000:
1320                         bmcr |= TG3_BMCR_SPEED1000;
1321                         break;
1322                 };
1323
1324                 if (tp->link_config.duplex == DUPLEX_FULL)
1325                         bmcr |= BMCR_FULLDPLX;
1326
1327                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1328                 if (bmcr != orig_bmcr) {
1329                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1330                         for (i = 0; i < 1500; i++) {
1331                                 u32 tmp;
1332
1333                                 udelay(10);
1334                                 tg3_readphy(tp, MII_BMSR, &tmp);
1335                                 tg3_readphy(tp, MII_BMSR, &tmp);
1336                                 if (!(tmp & BMSR_LSTATUS)) {
1337                                         udelay(40);
1338                                         break;
1339                                 }
1340                         }
1341                         tg3_writephy(tp, MII_BMCR, bmcr);
1342                         udelay(40);
1343                 }
1344         } else {
1345                 tg3_writephy(tp, MII_BMCR,
1346                              BMCR_ANENABLE | BMCR_ANRESTART);
1347         }
1348
1349         return 0;
1350 }
1351
1352 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1353 {
1354         int err;
1355
1356         /* Turn off tap power management. */
1357         /* Set Extended packet length bit */
1358         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1359
1360         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1361         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1362
1363         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1364         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1365
1366         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1367         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1368
1369         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1370         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1371
1372         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1373         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1374
1375         udelay(40);
1376
1377         return err;
1378 }
1379
1380 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1381 {
1382         u32 adv_reg, all_mask;
1383
1384         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1385         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1386                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1387         if ((adv_reg & all_mask) != all_mask)
1388                 return 0;
1389         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1390                 u32 tg3_ctrl;
1391
1392                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1393                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1394                             MII_TG3_CTRL_ADV_1000_FULL);
1395                 if ((tg3_ctrl & all_mask) != all_mask)
1396                         return 0;
1397         }
1398         return 1;
1399 }
1400
1401 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1402 {
1403         int current_link_up;
1404         u32 bmsr, dummy;
1405         u16 current_speed;
1406         u8 current_duplex;
1407         int i, err;
1408
1409         tw32(MAC_EVENT, 0);
1410
1411         tw32_f(MAC_STATUS,
1412              (MAC_STATUS_SYNC_CHANGED |
1413               MAC_STATUS_CFG_CHANGED |
1414               MAC_STATUS_MI_COMPLETION |
1415               MAC_STATUS_LNKSTATE_CHANGED));
1416         udelay(40);
1417
1418         tp->mi_mode = MAC_MI_MODE_BASE;
1419         tw32_f(MAC_MI_MODE, tp->mi_mode);
1420         udelay(80);
1421
1422         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1423
1424         /* Some third-party PHYs need to be reset on link going
1425          * down.
1426          */
1427         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1428              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1429              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1430             netif_carrier_ok(tp->dev)) {
1431                 tg3_readphy(tp, MII_BMSR, &bmsr);
1432                 tg3_readphy(tp, MII_BMSR, &bmsr);
1433                 if (!(bmsr & BMSR_LSTATUS))
1434                         force_reset = 1;
1435         }
1436         if (force_reset)
1437                 tg3_phy_reset(tp);
1438
1439         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1440                 tg3_readphy(tp, MII_BMSR, &bmsr);
1441                 tg3_readphy(tp, MII_BMSR, &bmsr);
1442
1443                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1444                         bmsr = 0;
1445
1446                 if (!(bmsr & BMSR_LSTATUS)) {
1447                         err = tg3_init_5401phy_dsp(tp);
1448                         if (err)
1449                                 return err;
1450
1451                         tg3_readphy(tp, MII_BMSR, &bmsr);
1452                         for (i = 0; i < 1000; i++) {
1453                                 udelay(10);
1454                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1455                                 if (bmsr & BMSR_LSTATUS) {
1456                                         udelay(40);
1457                                         break;
1458                                 }
1459                         }
1460
1461                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1462                             !(bmsr & BMSR_LSTATUS) &&
1463                             tp->link_config.active_speed == SPEED_1000) {
1464                                 err = tg3_phy_reset(tp);
1465                                 if (!err)
1466                                         err = tg3_init_5401phy_dsp(tp);
1467                                 if (err)
1468                                         return err;
1469                         }
1470                 }
1471         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1472                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1473                 /* 5701 {A0,B0} CRC bug workaround */
1474                 tg3_writephy(tp, 0x15, 0x0a75);
1475                 tg3_writephy(tp, 0x1c, 0x8c68);
1476                 tg3_writephy(tp, 0x1c, 0x8d68);
1477                 tg3_writephy(tp, 0x1c, 0x8c68);
1478         }
1479
1480         /* Clear pending interrupts... */
1481         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1482         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1483
1484         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1485                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1486         else
1487                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1488
1489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1491                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1492                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1493                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1494                 else
1495                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1496         }
1497
1498         current_link_up = 0;
1499         current_speed = SPEED_INVALID;
1500         current_duplex = DUPLEX_INVALID;
1501
1502         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1503                 u32 val;
1504
1505                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1506                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1507                 if (!(val & (1 << 10))) {
1508                         val |= (1 << 10);
1509                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1510                         goto relink;
1511                 }
1512         }
1513
1514         bmsr = 0;
1515         for (i = 0; i < 100; i++) {
1516                 tg3_readphy(tp, MII_BMSR, &bmsr);
1517                 tg3_readphy(tp, MII_BMSR, &bmsr);
1518                 if (bmsr & BMSR_LSTATUS)
1519                         break;
1520                 udelay(40);
1521         }
1522
1523         if (bmsr & BMSR_LSTATUS) {
1524                 u32 aux_stat, bmcr;
1525
1526                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1527                 for (i = 0; i < 2000; i++) {
1528                         udelay(10);
1529                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1530                         if (aux_stat)
1531                                 break;
1532                 }
1533
1534                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1535                                              &current_speed,
1536                                              &current_duplex);
1537
1538                 bmcr = 0;
1539                 for (i = 0; i < 200; i++) {
1540                         tg3_readphy(tp, MII_BMCR, &bmcr);
1541                         tg3_readphy(tp, MII_BMCR, &bmcr);
1542                         if (bmcr && bmcr != 0x7fff)
1543                                 break;
1544                         udelay(10);
1545                 }
1546
1547                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1548                         if (bmcr & BMCR_ANENABLE) {
1549                                 current_link_up = 1;
1550
1551                                 /* Force autoneg restart if we are exiting
1552                                  * low power mode.
1553                                  */
1554                                 if (!tg3_copper_is_advertising_all(tp))
1555                                         current_link_up = 0;
1556                         } else {
1557                                 current_link_up = 0;
1558                         }
1559                 } else {
1560                         if (!(bmcr & BMCR_ANENABLE) &&
1561                             tp->link_config.speed == current_speed &&
1562                             tp->link_config.duplex == current_duplex) {
1563                                 current_link_up = 1;
1564                         } else {
1565                                 current_link_up = 0;
1566                         }
1567                 }
1568
1569                 tp->link_config.active_speed = current_speed;
1570                 tp->link_config.active_duplex = current_duplex;
1571         }
1572
1573         if (current_link_up == 1 &&
1574             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1575             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1576                 u32 local_adv, remote_adv;
1577
1578                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1579                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1580
1581                 tg3_readphy(tp, MII_LPA, &remote_adv);
1582                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1583
1584                 /* If we are not advertising full pause capability,
1585                  * something is wrong.  Bring the link down and reconfigure.
1586                  */
1587                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1588                         current_link_up = 0;
1589                 } else {
1590                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1591                 }
1592         }
1593 relink:
1594         if (current_link_up == 0) {
1595                 u32 tmp;
1596
1597                 tg3_phy_copper_begin(tp);
1598
1599                 tg3_readphy(tp, MII_BMSR, &tmp);
1600                 tg3_readphy(tp, MII_BMSR, &tmp);
1601                 if (tmp & BMSR_LSTATUS)
1602                         current_link_up = 1;
1603         }
1604
1605         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1606         if (current_link_up == 1) {
1607                 if (tp->link_config.active_speed == SPEED_100 ||
1608                     tp->link_config.active_speed == SPEED_10)
1609                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1610                 else
1611                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1612         } else
1613                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1614
1615         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1616         if (tp->link_config.active_duplex == DUPLEX_HALF)
1617                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1618
1619         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1621                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1622                     (current_link_up == 1 &&
1623                      tp->link_config.active_speed == SPEED_10))
1624                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1625         } else {
1626                 if (current_link_up == 1)
1627                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1628         }
1629
1630         /* ??? Without this setting Netgear GA302T PHY does not
1631          * ??? send/receive packets...
1632          */
1633         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1634             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1635                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1636                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1637                 udelay(80);
1638         }
1639
1640         tw32_f(MAC_MODE, tp->mac_mode);
1641         udelay(40);
1642
1643         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1644                 /* Polled via timer. */
1645                 tw32_f(MAC_EVENT, 0);
1646         } else {
1647                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1648         }
1649         udelay(40);
1650
1651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1652             current_link_up == 1 &&
1653             tp->link_config.active_speed == SPEED_1000 &&
1654             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1655              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1656                 udelay(120);
1657                 tw32_f(MAC_STATUS,
1658                      (MAC_STATUS_SYNC_CHANGED |
1659                       MAC_STATUS_CFG_CHANGED));
1660                 udelay(40);
1661                 tg3_write_mem(tp,
1662                               NIC_SRAM_FIRMWARE_MBOX,
1663                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1664         }
1665
1666         if (current_link_up != netif_carrier_ok(tp->dev)) {
1667                 if (current_link_up)
1668                         netif_carrier_on(tp->dev);
1669                 else
1670                         netif_carrier_off(tp->dev);
1671                 tg3_link_report(tp);
1672         }
1673
1674         return 0;
1675 }
1676
1677 struct tg3_fiber_aneginfo {
1678         int state;
1679 #define ANEG_STATE_UNKNOWN              0
1680 #define ANEG_STATE_AN_ENABLE            1
1681 #define ANEG_STATE_RESTART_INIT         2
1682 #define ANEG_STATE_RESTART              3
1683 #define ANEG_STATE_DISABLE_LINK_OK      4
1684 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1685 #define ANEG_STATE_ABILITY_DETECT       6
1686 #define ANEG_STATE_ACK_DETECT_INIT      7
1687 #define ANEG_STATE_ACK_DETECT           8
1688 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1689 #define ANEG_STATE_COMPLETE_ACK         10
1690 #define ANEG_STATE_IDLE_DETECT_INIT     11
1691 #define ANEG_STATE_IDLE_DETECT          12
1692 #define ANEG_STATE_LINK_OK              13
1693 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1694 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1695
1696         u32 flags;
1697 #define MR_AN_ENABLE            0x00000001
1698 #define MR_RESTART_AN           0x00000002
1699 #define MR_AN_COMPLETE          0x00000004
1700 #define MR_PAGE_RX              0x00000008
1701 #define MR_NP_LOADED            0x00000010
1702 #define MR_TOGGLE_TX            0x00000020
1703 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1704 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1705 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1706 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1707 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1708 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1709 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1710 #define MR_TOGGLE_RX            0x00002000
1711 #define MR_NP_RX                0x00004000
1712
1713 #define MR_LINK_OK              0x80000000
1714
1715         unsigned long link_time, cur_time;
1716
1717         u32 ability_match_cfg;
1718         int ability_match_count;
1719
1720         char ability_match, idle_match, ack_match;
1721
1722         u32 txconfig, rxconfig;
1723 #define ANEG_CFG_NP             0x00000080
1724 #define ANEG_CFG_ACK            0x00000040
1725 #define ANEG_CFG_RF2            0x00000020
1726 #define ANEG_CFG_RF1            0x00000010
1727 #define ANEG_CFG_PS2            0x00000001
1728 #define ANEG_CFG_PS1            0x00008000
1729 #define ANEG_CFG_HD             0x00004000
1730 #define ANEG_CFG_FD             0x00002000
1731 #define ANEG_CFG_INVAL          0x00001f06
1732
1733 };
1734 #define ANEG_OK         0
1735 #define ANEG_DONE       1
1736 #define ANEG_TIMER_ENAB 2
1737 #define ANEG_FAILED     -1
1738
1739 #define ANEG_STATE_SETTLE_TIME  10000
1740
1741 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1742                                    struct tg3_fiber_aneginfo *ap)
1743 {
1744         unsigned long delta;
1745         u32 rx_cfg_reg;
1746         int ret;
1747
1748         if (ap->state == ANEG_STATE_UNKNOWN) {
1749                 ap->rxconfig = 0;
1750                 ap->link_time = 0;
1751                 ap->cur_time = 0;
1752                 ap->ability_match_cfg = 0;
1753                 ap->ability_match_count = 0;
1754                 ap->ability_match = 0;
1755                 ap->idle_match = 0;
1756                 ap->ack_match = 0;
1757         }
1758         ap->cur_time++;
1759
1760         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1761                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1762
1763                 if (rx_cfg_reg != ap->ability_match_cfg) {
1764                         ap->ability_match_cfg = rx_cfg_reg;
1765                         ap->ability_match = 0;
1766                         ap->ability_match_count = 0;
1767                 } else {
1768                         if (++ap->ability_match_count > 1) {
1769                                 ap->ability_match = 1;
1770                                 ap->ability_match_cfg = rx_cfg_reg;
1771                         }
1772                 }
1773                 if (rx_cfg_reg & ANEG_CFG_ACK)
1774                         ap->ack_match = 1;
1775                 else
1776                         ap->ack_match = 0;
1777
1778                 ap->idle_match = 0;
1779         } else {
1780                 ap->idle_match = 1;
1781                 ap->ability_match_cfg = 0;
1782                 ap->ability_match_count = 0;
1783                 ap->ability_match = 0;
1784                 ap->ack_match = 0;
1785
1786                 rx_cfg_reg = 0;
1787         }
1788
1789         ap->rxconfig = rx_cfg_reg;
1790         ret = ANEG_OK;
1791
1792         switch(ap->state) {
1793         case ANEG_STATE_UNKNOWN:
1794                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1795                         ap->state = ANEG_STATE_AN_ENABLE;
1796
1797                 /* fallthru */
1798         case ANEG_STATE_AN_ENABLE:
1799                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1800                 if (ap->flags & MR_AN_ENABLE) {
1801                         ap->link_time = 0;
1802                         ap->cur_time = 0;
1803                         ap->ability_match_cfg = 0;
1804                         ap->ability_match_count = 0;
1805                         ap->ability_match = 0;
1806                         ap->idle_match = 0;
1807                         ap->ack_match = 0;
1808
1809                         ap->state = ANEG_STATE_RESTART_INIT;
1810                 } else {
1811                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1812                 }
1813                 break;
1814
1815         case ANEG_STATE_RESTART_INIT:
1816                 ap->link_time = ap->cur_time;
1817                 ap->flags &= ~(MR_NP_LOADED);
1818                 ap->txconfig = 0;
1819                 tw32(MAC_TX_AUTO_NEG, 0);
1820                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1821                 tw32_f(MAC_MODE, tp->mac_mode);
1822                 udelay(40);
1823
1824                 ret = ANEG_TIMER_ENAB;
1825                 ap->state = ANEG_STATE_RESTART;
1826
1827                 /* fallthru */
1828         case ANEG_STATE_RESTART:
1829                 delta = ap->cur_time - ap->link_time;
1830                 if (delta > ANEG_STATE_SETTLE_TIME) {
1831                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1832                 } else {
1833                         ret = ANEG_TIMER_ENAB;
1834                 }
1835                 break;
1836
1837         case ANEG_STATE_DISABLE_LINK_OK:
1838                 ret = ANEG_DONE;
1839                 break;
1840
1841         case ANEG_STATE_ABILITY_DETECT_INIT:
1842                 ap->flags &= ~(MR_TOGGLE_TX);
1843                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1844                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1845                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1846                 tw32_f(MAC_MODE, tp->mac_mode);
1847                 udelay(40);
1848
1849                 ap->state = ANEG_STATE_ABILITY_DETECT;
1850                 break;
1851
1852         case ANEG_STATE_ABILITY_DETECT:
1853                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1854                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1855                 }
1856                 break;
1857
1858         case ANEG_STATE_ACK_DETECT_INIT:
1859                 ap->txconfig |= ANEG_CFG_ACK;
1860                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1861                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1862                 tw32_f(MAC_MODE, tp->mac_mode);
1863                 udelay(40);
1864
1865                 ap->state = ANEG_STATE_ACK_DETECT;
1866
1867                 /* fallthru */
1868         case ANEG_STATE_ACK_DETECT:
1869                 if (ap->ack_match != 0) {
1870                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1871                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1872                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1873                         } else {
1874                                 ap->state = ANEG_STATE_AN_ENABLE;
1875                         }
1876                 } else if (ap->ability_match != 0 &&
1877                            ap->rxconfig == 0) {
1878                         ap->state = ANEG_STATE_AN_ENABLE;
1879                 }
1880                 break;
1881
1882         case ANEG_STATE_COMPLETE_ACK_INIT:
1883                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1884                         ret = ANEG_FAILED;
1885                         break;
1886                 }
1887                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1888                                MR_LP_ADV_HALF_DUPLEX |
1889                                MR_LP_ADV_SYM_PAUSE |
1890                                MR_LP_ADV_ASYM_PAUSE |
1891                                MR_LP_ADV_REMOTE_FAULT1 |
1892                                MR_LP_ADV_REMOTE_FAULT2 |
1893                                MR_LP_ADV_NEXT_PAGE |
1894                                MR_TOGGLE_RX |
1895                                MR_NP_RX);
1896                 if (ap->rxconfig & ANEG_CFG_FD)
1897                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1898                 if (ap->rxconfig & ANEG_CFG_HD)
1899                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1900                 if (ap->rxconfig & ANEG_CFG_PS1)
1901                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1902                 if (ap->rxconfig & ANEG_CFG_PS2)
1903                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1904                 if (ap->rxconfig & ANEG_CFG_RF1)
1905                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1906                 if (ap->rxconfig & ANEG_CFG_RF2)
1907                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1908                 if (ap->rxconfig & ANEG_CFG_NP)
1909                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1910
1911                 ap->link_time = ap->cur_time;
1912
1913                 ap->flags ^= (MR_TOGGLE_TX);
1914                 if (ap->rxconfig & 0x0008)
1915                         ap->flags |= MR_TOGGLE_RX;
1916                 if (ap->rxconfig & ANEG_CFG_NP)
1917                         ap->flags |= MR_NP_RX;
1918                 ap->flags |= MR_PAGE_RX;
1919
1920                 ap->state = ANEG_STATE_COMPLETE_ACK;
1921                 ret = ANEG_TIMER_ENAB;
1922                 break;
1923
1924         case ANEG_STATE_COMPLETE_ACK:
1925                 if (ap->ability_match != 0 &&
1926                     ap->rxconfig == 0) {
1927                         ap->state = ANEG_STATE_AN_ENABLE;
1928                         break;
1929                 }
1930                 delta = ap->cur_time - ap->link_time;
1931                 if (delta > ANEG_STATE_SETTLE_TIME) {
1932                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1933                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1934                         } else {
1935                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1936                                     !(ap->flags & MR_NP_RX)) {
1937                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1938                                 } else {
1939                                         ret = ANEG_FAILED;
1940                                 }
1941                         }
1942                 }
1943                 break;
1944
1945         case ANEG_STATE_IDLE_DETECT_INIT:
1946                 ap->link_time = ap->cur_time;
1947                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1948                 tw32_f(MAC_MODE, tp->mac_mode);
1949                 udelay(40);
1950
1951                 ap->state = ANEG_STATE_IDLE_DETECT;
1952                 ret = ANEG_TIMER_ENAB;
1953                 break;
1954
1955         case ANEG_STATE_IDLE_DETECT:
1956                 if (ap->ability_match != 0 &&
1957                     ap->rxconfig == 0) {
1958                         ap->state = ANEG_STATE_AN_ENABLE;
1959                         break;
1960                 }
1961                 delta = ap->cur_time - ap->link_time;
1962                 if (delta > ANEG_STATE_SETTLE_TIME) {
1963                         /* XXX another gem from the Broadcom driver :( */
1964                         ap->state = ANEG_STATE_LINK_OK;
1965                 }
1966                 break;
1967
1968         case ANEG_STATE_LINK_OK:
1969                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1970                 ret = ANEG_DONE;
1971                 break;
1972
1973         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1974                 /* ??? unimplemented */
1975                 break;
1976
1977         case ANEG_STATE_NEXT_PAGE_WAIT:
1978                 /* ??? unimplemented */
1979                 break;
1980
1981         default:
1982                 ret = ANEG_FAILED;
1983                 break;
1984         };
1985
1986         return ret;
1987 }
1988
1989 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
1990 {
1991         int res = 0;
1992         struct tg3_fiber_aneginfo aninfo;
1993         int status = ANEG_FAILED;
1994         unsigned int tick;
1995         u32 tmp;
1996
1997         tw32_f(MAC_TX_AUTO_NEG, 0);
1998
1999         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2000         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2001         udelay(40);
2002
2003         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2004         udelay(40);
2005
2006         memset(&aninfo, 0, sizeof(aninfo));
2007         aninfo.flags |= MR_AN_ENABLE;
2008         aninfo.state = ANEG_STATE_UNKNOWN;
2009         aninfo.cur_time = 0;
2010         tick = 0;
2011         while (++tick < 195000) {
2012                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2013                 if (status == ANEG_DONE || status == ANEG_FAILED)
2014                         break;
2015
2016                 udelay(1);
2017         }
2018
2019         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2020         tw32_f(MAC_MODE, tp->mac_mode);
2021         udelay(40);
2022
2023         *flags = aninfo.flags;
2024
2025         if (status == ANEG_DONE &&
2026             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2027                              MR_LP_ADV_FULL_DUPLEX)))
2028                 res = 1;
2029
2030         return res;
2031 }
2032
2033 static void tg3_init_bcm8002(struct tg3 *tp)
2034 {
2035         u32 mac_status = tr32(MAC_STATUS);
2036         int i;
2037
2038         /* Reset when initting first time or we have a link. */
2039         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2040             !(mac_status & MAC_STATUS_PCS_SYNCED))
2041                 return;
2042
2043         /* Set PLL lock range. */
2044         tg3_writephy(tp, 0x16, 0x8007);
2045
2046         /* SW reset */
2047         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2048
2049         /* Wait for reset to complete. */
2050         /* XXX schedule_timeout() ... */
2051         for (i = 0; i < 500; i++)
2052                 udelay(10);
2053
2054         /* Config mode; select PMA/Ch 1 regs. */
2055         tg3_writephy(tp, 0x10, 0x8411);
2056
2057         /* Enable auto-lock and comdet, select txclk for tx. */
2058         tg3_writephy(tp, 0x11, 0x0a10);
2059
2060         tg3_writephy(tp, 0x18, 0x00a0);
2061         tg3_writephy(tp, 0x16, 0x41ff);
2062
2063         /* Assert and deassert POR. */
2064         tg3_writephy(tp, 0x13, 0x0400);
2065         udelay(40);
2066         tg3_writephy(tp, 0x13, 0x0000);
2067
2068         tg3_writephy(tp, 0x11, 0x0a50);
2069         udelay(40);
2070         tg3_writephy(tp, 0x11, 0x0a10);
2071
2072         /* Wait for signal to stabilize */
2073         /* XXX schedule_timeout() ... */
2074         for (i = 0; i < 15000; i++)
2075                 udelay(10);
2076
2077         /* Deselect the channel register so we can read the PHYID
2078          * later.
2079          */
2080         tg3_writephy(tp, 0x10, 0x8011);
2081 }
2082
2083 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2084 {
2085         u32 sg_dig_ctrl, sg_dig_status;
2086         u32 serdes_cfg, expected_sg_dig_ctrl;
2087         int workaround, port_a;
2088         int current_link_up;
2089
2090         serdes_cfg = 0;
2091         expected_sg_dig_ctrl = 0;
2092         workaround = 0;
2093         port_a = 1;
2094         current_link_up = 0;
2095
2096         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2097             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2098                 workaround = 1;
2099                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2100                         port_a = 0;
2101
2102                 serdes_cfg = tr32(MAC_SERDES_CFG) &
2103                         ((1 << 23) | (1 << 22) | (1 << 21) | (1 << 20));
2104         }
2105
2106         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2107
2108         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2109                 if (sg_dig_ctrl & (1 << 31)) {
2110                         if (workaround) {
2111                                 u32 val = serdes_cfg;
2112
2113                                 if (port_a)
2114                                         val |= 0xc010880;
2115                                 else
2116                                         val |= 0x4010880;
2117                                 tw32_f(MAC_SERDES_CFG, val);
2118                         }
2119                         tw32_f(SG_DIG_CTRL, 0x01388400);
2120                 }
2121                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2122                         tg3_setup_flow_control(tp, 0, 0);
2123                         current_link_up = 1;
2124                 }
2125                 goto out;
2126         }
2127
2128         /* Want auto-negotiation.  */
2129         expected_sg_dig_ctrl = 0x81388400;
2130
2131         /* Pause capability */
2132         expected_sg_dig_ctrl |= (1 << 11);
2133
2134         /* Asymettric pause */
2135         expected_sg_dig_ctrl |= (1 << 12);
2136
2137         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2138                 if (workaround)
2139                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011880);
2140                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2141                 udelay(5);
2142                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2143
2144                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2145         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2146                                  MAC_STATUS_SIGNAL_DET)) {
2147                 sg_dig_status = tr32(SG_DIG_STATUS);
2148
2149                 if ((sg_dig_status & (1 << 1)) &&
2150                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2151                         u32 local_adv, remote_adv;
2152
2153                         local_adv = ADVERTISE_PAUSE_CAP;
2154                         remote_adv = 0;
2155                         if (sg_dig_status & (1 << 19))
2156                                 remote_adv |= LPA_PAUSE_CAP;
2157                         if (sg_dig_status & (1 << 20))
2158                                 remote_adv |= LPA_PAUSE_ASYM;
2159
2160                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2161                         current_link_up = 1;
2162                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2163                 } else if (!(sg_dig_status & (1 << 1))) {
2164                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2165                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2166                         else {
2167                                 if (workaround) {
2168                                         u32 val = serdes_cfg;
2169
2170                                         if (port_a)
2171                                                 val |= 0xc010880;
2172                                         else
2173                                                 val |= 0x4010880;
2174
2175                                         tw32_f(MAC_SERDES_CFG, val);
2176                                 }
2177
2178                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2179                                 udelay(40);
2180
2181                                 mac_status = tr32(MAC_STATUS);
2182                                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2183                                         tg3_setup_flow_control(tp, 0, 0);
2184                                         current_link_up = 1;
2185                                 }
2186                         }
2187                 }
2188         }
2189
2190 out:
2191         return current_link_up;
2192 }
2193
2194 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2195 {
2196         int current_link_up = 0;
2197
2198         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2199                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2200                 goto out;
2201         }
2202
2203         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2204                 u32 flags;
2205                 int i;
2206   
2207                 if (fiber_autoneg(tp, &flags)) {
2208                         u32 local_adv, remote_adv;
2209
2210                         local_adv = ADVERTISE_PAUSE_CAP;
2211                         remote_adv = 0;
2212                         if (flags & MR_LP_ADV_SYM_PAUSE)
2213                                 remote_adv |= LPA_PAUSE_CAP;
2214                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2215                                 remote_adv |= LPA_PAUSE_ASYM;
2216
2217                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2218
2219                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2220                         current_link_up = 1;
2221                 }
2222                 for (i = 0; i < 30; i++) {
2223                         udelay(20);
2224                         tw32_f(MAC_STATUS,
2225                                (MAC_STATUS_SYNC_CHANGED |
2226                                 MAC_STATUS_CFG_CHANGED));
2227                         udelay(40);
2228                         if ((tr32(MAC_STATUS) &
2229                              (MAC_STATUS_SYNC_CHANGED |
2230                               MAC_STATUS_CFG_CHANGED)) == 0)
2231                                 break;
2232                 }
2233
2234                 mac_status = tr32(MAC_STATUS);
2235                 if (current_link_up == 0 &&
2236                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2237                     !(mac_status & MAC_STATUS_RCVD_CFG))
2238                         current_link_up = 1;
2239         } else {
2240                 /* Forcing 1000FD link up. */
2241                 current_link_up = 1;
2242                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2243
2244                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2245                 udelay(40);
2246         }
2247
2248 out:
2249         return current_link_up;
2250 }
2251
2252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2253 {
2254         u32 orig_pause_cfg;
2255         u16 orig_active_speed;
2256         u8 orig_active_duplex;
2257         u32 mac_status;
2258         int current_link_up;
2259         int i;
2260
2261         orig_pause_cfg =
2262                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2263                                   TG3_FLAG_TX_PAUSE));
2264         orig_active_speed = tp->link_config.active_speed;
2265         orig_active_duplex = tp->link_config.active_duplex;
2266
2267         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2268             netif_carrier_ok(tp->dev) &&
2269             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2270                 mac_status = tr32(MAC_STATUS);
2271                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2272                                MAC_STATUS_SIGNAL_DET |
2273                                MAC_STATUS_CFG_CHANGED |
2274                                MAC_STATUS_RCVD_CFG);
2275                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2276                                    MAC_STATUS_SIGNAL_DET)) {
2277                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2278                                             MAC_STATUS_CFG_CHANGED));
2279                         return 0;
2280                 }
2281         }
2282
2283         tw32_f(MAC_TX_AUTO_NEG, 0);
2284
2285         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2286         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2287         tw32_f(MAC_MODE, tp->mac_mode);
2288         udelay(40);
2289
2290         if (tp->phy_id == PHY_ID_BCM8002)
2291                 tg3_init_bcm8002(tp);
2292
2293         /* Enable link change event even when serdes polling.  */
2294         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2295         udelay(40);
2296
2297         current_link_up = 0;
2298         mac_status = tr32(MAC_STATUS);
2299
2300         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2301                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2302         else
2303                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2304
2305         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2306         tw32_f(MAC_MODE, tp->mac_mode);
2307         udelay(40);
2308
2309         tp->hw_status->status =
2310                 (SD_STATUS_UPDATED |
2311                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2312
2313         for (i = 0; i < 100; i++) {
2314                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2315                                     MAC_STATUS_CFG_CHANGED));
2316                 udelay(5);
2317                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2318                                          MAC_STATUS_CFG_CHANGED)) == 0)
2319                         break;
2320         }
2321
2322         mac_status = tr32(MAC_STATUS);
2323         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2324                 current_link_up = 0;
2325                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2326                         tw32_f(MAC_MODE, (tp->mac_mode |
2327                                           MAC_MODE_SEND_CONFIGS));
2328                         udelay(1);
2329                         tw32_f(MAC_MODE, tp->mac_mode);
2330                 }
2331         }
2332
2333         if (current_link_up == 1) {
2334                 tp->link_config.active_speed = SPEED_1000;
2335                 tp->link_config.active_duplex = DUPLEX_FULL;
2336                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2337                                     LED_CTRL_LNKLED_OVERRIDE |
2338                                     LED_CTRL_1000MBPS_ON));
2339         } else {
2340                 tp->link_config.active_speed = SPEED_INVALID;
2341                 tp->link_config.active_duplex = DUPLEX_INVALID;
2342                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2343                                     LED_CTRL_LNKLED_OVERRIDE |
2344                                     LED_CTRL_TRAFFIC_OVERRIDE));
2345         }
2346
2347         if (current_link_up != netif_carrier_ok(tp->dev)) {
2348                 if (current_link_up)
2349                         netif_carrier_on(tp->dev);
2350                 else
2351                         netif_carrier_off(tp->dev);
2352                 tg3_link_report(tp);
2353         } else {
2354                 u32 now_pause_cfg =
2355                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2356                                          TG3_FLAG_TX_PAUSE);
2357                 if (orig_pause_cfg != now_pause_cfg ||
2358                     orig_active_speed != tp->link_config.active_speed ||
2359                     orig_active_duplex != tp->link_config.active_duplex)
2360                         tg3_link_report(tp);
2361         }
2362
2363         return 0;
2364 }
2365
2366 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2367 {
2368         int err;
2369
2370         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2371                 err = tg3_setup_fiber_phy(tp, force_reset);
2372         } else {
2373                 err = tg3_setup_copper_phy(tp, force_reset);
2374         }
2375
2376         if (tp->link_config.active_speed == SPEED_1000 &&
2377             tp->link_config.active_duplex == DUPLEX_HALF)
2378                 tw32(MAC_TX_LENGTHS,
2379                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2380                       (6 << TX_LENGTHS_IPG_SHIFT) |
2381                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2382         else
2383                 tw32(MAC_TX_LENGTHS,
2384                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2385                       (6 << TX_LENGTHS_IPG_SHIFT) |
2386                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2387
2388         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2389             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2390                 if (netif_carrier_ok(tp->dev)) {
2391                         tw32(HOSTCC_STAT_COAL_TICKS,
2392                              DEFAULT_STAT_COAL_TICKS);
2393                 } else {
2394                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2395                 }
2396         }
2397
2398         return err;
2399 }
2400
2401 /* Tigon3 never reports partial packet sends.  So we do not
2402  * need special logic to handle SKBs that have not had all
2403  * of their frags sent yet, like SunGEM does.
2404  */
2405 static void tg3_tx(struct tg3 *tp)
2406 {
2407         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2408         u32 sw_idx = tp->tx_cons;
2409
2410         while (sw_idx != hw_idx) {
2411                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2412                 struct sk_buff *skb = ri->skb;
2413                 int i;
2414
2415                 if (unlikely(skb == NULL))
2416                         BUG();
2417
2418                 pci_unmap_single(tp->pdev,
2419                                  pci_unmap_addr(ri, mapping),
2420                                  skb_headlen(skb),
2421                                  PCI_DMA_TODEVICE);
2422
2423                 ri->skb = NULL;
2424
2425                 sw_idx = NEXT_TX(sw_idx);
2426
2427                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2428                         if (unlikely(sw_idx == hw_idx))
2429                                 BUG();
2430
2431                         ri = &tp->tx_buffers[sw_idx];
2432                         if (unlikely(ri->skb != NULL))
2433                                 BUG();
2434
2435                         pci_unmap_page(tp->pdev,
2436                                        pci_unmap_addr(ri, mapping),
2437                                        skb_shinfo(skb)->frags[i].size,
2438                                        PCI_DMA_TODEVICE);
2439
2440                         sw_idx = NEXT_TX(sw_idx);
2441                 }
2442
2443                 dev_kfree_skb_irq(skb);
2444         }
2445
2446         tp->tx_cons = sw_idx;
2447
2448         if (netif_queue_stopped(tp->dev) &&
2449             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2450                 netif_wake_queue(tp->dev);
2451 }
2452
2453 /* Returns size of skb allocated or < 0 on error.
2454  *
2455  * We only need to fill in the address because the other members
2456  * of the RX descriptor are invariant, see tg3_init_rings.
2457  *
2458  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2459  * posting buffers we only dirty the first cache line of the RX
2460  * descriptor (containing the address).  Whereas for the RX status
2461  * buffers the cpu only reads the last cacheline of the RX descriptor
2462  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2463  */
2464 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2465                             int src_idx, u32 dest_idx_unmasked)
2466 {
2467         struct tg3_rx_buffer_desc *desc;
2468         struct ring_info *map, *src_map;
2469         struct sk_buff *skb;
2470         dma_addr_t mapping;
2471         int skb_size, dest_idx;
2472
2473         src_map = NULL;
2474         switch (opaque_key) {
2475         case RXD_OPAQUE_RING_STD:
2476                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2477                 desc = &tp->rx_std[dest_idx];
2478                 map = &tp->rx_std_buffers[dest_idx];
2479                 if (src_idx >= 0)
2480                         src_map = &tp->rx_std_buffers[src_idx];
2481                 skb_size = RX_PKT_BUF_SZ;
2482                 break;
2483
2484         case RXD_OPAQUE_RING_JUMBO:
2485                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2486                 desc = &tp->rx_jumbo[dest_idx];
2487                 map = &tp->rx_jumbo_buffers[dest_idx];
2488                 if (src_idx >= 0)
2489                         src_map = &tp->rx_jumbo_buffers[src_idx];
2490                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2491                 break;
2492
2493         default:
2494                 return -EINVAL;
2495         };
2496
2497         /* Do not overwrite any of the map or rp information
2498          * until we are sure we can commit to a new buffer.
2499          *
2500          * Callers depend upon this behavior and assume that
2501          * we leave everything unchanged if we fail.
2502          */
2503         skb = dev_alloc_skb(skb_size);
2504         if (skb == NULL)
2505                 return -ENOMEM;
2506
2507         skb->dev = tp->dev;
2508         skb_reserve(skb, tp->rx_offset);
2509
2510         mapping = pci_map_single(tp->pdev, skb->data,
2511                                  skb_size - tp->rx_offset,
2512                                  PCI_DMA_FROMDEVICE);
2513
2514         map->skb = skb;
2515         pci_unmap_addr_set(map, mapping, mapping);
2516
2517         if (src_map != NULL)
2518                 src_map->skb = NULL;
2519
2520         desc->addr_hi = ((u64)mapping >> 32);
2521         desc->addr_lo = ((u64)mapping & 0xffffffff);
2522
2523         return skb_size;
2524 }
2525
2526 /* We only need to move over in the address because the other
2527  * members of the RX descriptor are invariant.  See notes above
2528  * tg3_alloc_rx_skb for full details.
2529  */
2530 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2531                            int src_idx, u32 dest_idx_unmasked)
2532 {
2533         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2534         struct ring_info *src_map, *dest_map;
2535         int dest_idx;
2536
2537         switch (opaque_key) {
2538         case RXD_OPAQUE_RING_STD:
2539                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2540                 dest_desc = &tp->rx_std[dest_idx];
2541                 dest_map = &tp->rx_std_buffers[dest_idx];
2542                 src_desc = &tp->rx_std[src_idx];
2543                 src_map = &tp->rx_std_buffers[src_idx];
2544                 break;
2545
2546         case RXD_OPAQUE_RING_JUMBO:
2547                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2548                 dest_desc = &tp->rx_jumbo[dest_idx];
2549                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2550                 src_desc = &tp->rx_jumbo[src_idx];
2551                 src_map = &tp->rx_jumbo_buffers[src_idx];
2552                 break;
2553
2554         default:
2555                 return;
2556         };
2557
2558         dest_map->skb = src_map->skb;
2559         pci_unmap_addr_set(dest_map, mapping,
2560                            pci_unmap_addr(src_map, mapping));
2561         dest_desc->addr_hi = src_desc->addr_hi;
2562         dest_desc->addr_lo = src_desc->addr_lo;
2563
2564         src_map->skb = NULL;
2565 }
2566
2567 #if TG3_VLAN_TAG_USED
2568 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2569 {
2570         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2571 }
2572 #endif
2573
2574 /* The RX ring scheme is composed of multiple rings which post fresh
2575  * buffers to the chip, and one special ring the chip uses to report
2576  * status back to the host.
2577  *
2578  * The special ring reports the status of received packets to the
2579  * host.  The chip does not write into the original descriptor the
2580  * RX buffer was obtained from.  The chip simply takes the original
2581  * descriptor as provided by the host, updates the status and length
2582  * field, then writes this into the next status ring entry.
2583  *
2584  * Each ring the host uses to post buffers to the chip is described
2585  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2586  * it is first placed into the on-chip ram.  When the packet's length
2587  * is known, it walks down the TG3_BDINFO entries to select the ring.
2588  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2589  * which is within the range of the new packet's length is chosen.
2590  *
2591  * The "separate ring for rx status" scheme may sound queer, but it makes
2592  * sense from a cache coherency perspective.  If only the host writes
2593  * to the buffer post rings, and only the chip writes to the rx status
2594  * rings, then cache lines never move beyond shared-modified state.
2595  * If both the host and chip were to write into the same ring, cache line
2596  * eviction could occur since both entities want it in an exclusive state.
2597  */
2598 static int tg3_rx(struct tg3 *tp, int budget)
2599 {
2600         u32 work_mask;
2601         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2602         u16 hw_idx, sw_idx;
2603         int received;
2604
2605         hw_idx = tp->hw_status->idx[0].rx_producer;
2606         /*
2607          * We need to order the read of hw_idx and the read of
2608          * the opaque cookie.
2609          */
2610         rmb();
2611         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2612         work_mask = 0;
2613         received = 0;
2614         while (sw_idx != hw_idx && budget > 0) {
2615                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2616                 unsigned int len;
2617                 struct sk_buff *skb;
2618                 dma_addr_t dma_addr;
2619                 u32 opaque_key, desc_idx, *post_ptr;
2620
2621                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2622                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2623                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2624                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2625                                                   mapping);
2626                         skb = tp->rx_std_buffers[desc_idx].skb;
2627                         post_ptr = &tp->rx_std_ptr;
2628                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2629                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2630                                                   mapping);
2631                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2632                         post_ptr = &tp->rx_jumbo_ptr;
2633                 }
2634                 else {
2635                         goto next_pkt_nopost;
2636                 }
2637
2638                 work_mask |= opaque_key;
2639
2640                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2641                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2642                 drop_it:
2643                         tg3_recycle_rx(tp, opaque_key,
2644                                        desc_idx, *post_ptr);
2645                 drop_it_no_recycle:
2646                         /* Other statistics kept track of by card. */
2647                         tp->net_stats.rx_dropped++;
2648                         goto next_pkt;
2649                 }
2650
2651                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2652
2653                 if (len > RX_COPY_THRESHOLD) {
2654                         int skb_size;
2655
2656                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2657                                                     desc_idx, *post_ptr);
2658                         if (skb_size < 0)
2659                                 goto drop_it;
2660
2661                         pci_unmap_single(tp->pdev, dma_addr,
2662                                          skb_size - tp->rx_offset,
2663                                          PCI_DMA_FROMDEVICE);
2664
2665                         skb_put(skb, len);
2666                 } else {
2667                         struct sk_buff *copy_skb;
2668
2669                         tg3_recycle_rx(tp, opaque_key,
2670                                        desc_idx, *post_ptr);
2671
2672                         copy_skb = dev_alloc_skb(len + 2);
2673                         if (copy_skb == NULL)
2674                                 goto drop_it_no_recycle;
2675
2676                         copy_skb->dev = tp->dev;
2677                         skb_reserve(copy_skb, 2);
2678                         skb_put(copy_skb, len);
2679                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2680                         memcpy(copy_skb->data, skb->data, len);
2681                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2682
2683                         /* We'll reuse the original ring buffer. */
2684                         skb = copy_skb;
2685                 }
2686
2687                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2688                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2689                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2690                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2691                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2692                 else
2693                         skb->ip_summed = CHECKSUM_NONE;
2694
2695                 skb->protocol = eth_type_trans(skb, tp->dev);
2696 #if TG3_VLAN_TAG_USED
2697                 if (tp->vlgrp != NULL &&
2698                     desc->type_flags & RXD_FLAG_VLAN) {
2699                         tg3_vlan_rx(tp, skb,
2700                                     desc->err_vlan & RXD_VLAN_MASK);
2701                 } else
2702 #endif
2703                         netif_receive_skb(skb);
2704
2705                 tp->dev->last_rx = jiffies;
2706                 received++;
2707                 budget--;
2708
2709 next_pkt:
2710                 (*post_ptr)++;
2711 next_pkt_nopost:
2712                 rx_rcb_ptr++;
2713                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2714         }
2715
2716         /* ACK the status ring. */
2717         tp->rx_rcb_ptr = rx_rcb_ptr;
2718         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2719                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2720
2721         /* Refill RX ring(s). */
2722         if (work_mask & RXD_OPAQUE_RING_STD) {
2723                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2724                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2725                              sw_idx);
2726         }
2727         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2728                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2729                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2730                              sw_idx);
2731         }
2732
2733         return received;
2734 }
2735
2736 static int tg3_poll(struct net_device *netdev, int *budget)
2737 {
2738         struct tg3 *tp = netdev_priv(netdev);
2739         struct tg3_hw_status *sblk = tp->hw_status;
2740         unsigned long flags;
2741         int done;
2742
2743         spin_lock_irqsave(&tp->lock, flags);
2744
2745         /* handle link change and other phy events */
2746         if (!(tp->tg3_flags &
2747               (TG3_FLAG_USE_LINKCHG_REG |
2748                TG3_FLAG_POLL_SERDES))) {
2749                 if (sblk->status & SD_STATUS_LINK_CHG) {
2750                         sblk->status = SD_STATUS_UPDATED |
2751                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2752                         tg3_setup_phy(tp, 0);
2753                 }
2754         }
2755
2756         /* run TX completion thread */
2757         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2758                 spin_lock(&tp->tx_lock);
2759                 tg3_tx(tp);
2760                 spin_unlock(&tp->tx_lock);
2761         }
2762
2763         spin_unlock_irqrestore(&tp->lock, flags);
2764
2765         /* run RX thread, within the bounds set by NAPI.
2766          * All RX "locking" is done by ensuring outside
2767          * code synchronizes with dev->poll()
2768          */
2769         done = 1;
2770         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2771                 int orig_budget = *budget;
2772                 int work_done;
2773
2774                 if (orig_budget > netdev->quota)
2775                         orig_budget = netdev->quota;
2776
2777                 work_done = tg3_rx(tp, orig_budget);
2778
2779                 *budget -= work_done;
2780                 netdev->quota -= work_done;
2781
2782                 if (work_done >= orig_budget)
2783                         done = 0;
2784         }
2785
2786         /* if no more work, tell net stack and NIC we're done */
2787         if (done) {
2788                 spin_lock_irqsave(&tp->lock, flags);
2789                 __netif_rx_complete(netdev);
2790                 tg3_enable_ints(tp);
2791                 spin_unlock_irqrestore(&tp->lock, flags);
2792         }
2793
2794         return (done ? 0 : 1);
2795 }
2796
2797 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2798 {
2799         struct tg3_hw_status *sblk = tp->hw_status;
2800         unsigned int work_exists = 0;
2801
2802         /* check for phy events */
2803         if (!(tp->tg3_flags &
2804               (TG3_FLAG_USE_LINKCHG_REG |
2805                TG3_FLAG_POLL_SERDES))) {
2806                 if (sblk->status & SD_STATUS_LINK_CHG)
2807                         work_exists = 1;
2808         }
2809         /* check for RX/TX work to do */
2810         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2811             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2812                 work_exists = 1;
2813
2814         return work_exists;
2815 }
2816
2817 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2818 {
2819         struct net_device *dev = dev_id;
2820         struct tg3 *tp = netdev_priv(dev);
2821         struct tg3_hw_status *sblk = tp->hw_status;
2822         unsigned long flags;
2823         unsigned int handled = 1;
2824
2825         spin_lock_irqsave(&tp->lock, flags);
2826
2827         if (sblk->status & SD_STATUS_UPDATED) {
2828                 /*
2829                  * writing any value to intr-mbox-0 clears PCI INTA# and
2830                  * chip-internal interrupt pending events.
2831                  * writing non-zero to intr-mbox-0 additional tells the
2832                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2833                  * event coalescing.
2834                  */
2835                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2836                              0x00000001);
2837                 /*
2838                  * Flush PCI write.  This also guarantees that our
2839                  * status block has been flushed to host memory.
2840                  */
2841                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2842                 sblk->status &= ~SD_STATUS_UPDATED;
2843
2844                 if (likely(tg3_has_work(dev, tp)))
2845                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2846                 else {
2847                         /* no work, shared interrupt perhaps?  re-enable
2848                          * interrupts, and flush that PCI write
2849                          */
2850                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2851                                 0x00000000);
2852                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2853                 }
2854         } else {        /* shared interrupt */
2855                 handled = 0;
2856         }
2857
2858         spin_unlock_irqrestore(&tp->lock, flags);
2859
2860         return IRQ_RETVAL(handled);
2861 }
2862
2863 static int tg3_init_hw(struct tg3 *);
2864 static int tg3_halt(struct tg3 *);
2865
2866 #ifdef CONFIG_NET_POLL_CONTROLLER
2867 static void tg3_poll_controller(struct net_device *dev)
2868 {
2869         tg3_interrupt(dev->irq, dev, NULL);
2870 }
2871 #endif
2872
2873 static void tg3_reset_task(void *_data)
2874 {
2875         struct tg3 *tp = _data;
2876         unsigned int restart_timer;
2877
2878         tg3_netif_stop(tp);
2879
2880         spin_lock_irq(&tp->lock);
2881         spin_lock(&tp->tx_lock);
2882
2883         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2884         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2885
2886         tg3_halt(tp);
2887         tg3_init_hw(tp);
2888
2889         tg3_netif_start(tp);
2890
2891         spin_unlock(&tp->tx_lock);
2892         spin_unlock_irq(&tp->lock);
2893
2894         if (restart_timer)
2895                 mod_timer(&tp->timer, jiffies + 1);
2896 }
2897
2898 static void tg3_tx_timeout(struct net_device *dev)
2899 {
2900         struct tg3 *tp = netdev_priv(dev);
2901
2902         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2903                dev->name);
2904
2905         schedule_work(&tp->reset_task);
2906 }
2907
2908 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2909
2910 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2911                                        u32 guilty_entry, int guilty_len,
2912                                        u32 last_plus_one, u32 *start, u32 mss)
2913 {
2914         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2915         dma_addr_t new_addr;
2916         u32 entry = *start;
2917         int i;
2918
2919         if (!new_skb) {
2920                 dev_kfree_skb(skb);
2921                 return -1;
2922         }
2923
2924         /* New SKB is guaranteed to be linear. */
2925         entry = *start;
2926         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2927                                   PCI_DMA_TODEVICE);
2928         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2929                     (skb->ip_summed == CHECKSUM_HW) ?
2930                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2931         *start = NEXT_TX(entry);
2932
2933         /* Now clean up the sw ring entries. */
2934         i = 0;
2935         while (entry != last_plus_one) {
2936                 int len;
2937
2938                 if (i == 0)
2939                         len = skb_headlen(skb);
2940                 else
2941                         len = skb_shinfo(skb)->frags[i-1].size;
2942                 pci_unmap_single(tp->pdev,
2943                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2944                                  len, PCI_DMA_TODEVICE);
2945                 if (i == 0) {
2946                         tp->tx_buffers[entry].skb = new_skb;
2947                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2948                 } else {
2949                         tp->tx_buffers[entry].skb = NULL;
2950                 }
2951                 entry = NEXT_TX(entry);
2952         }
2953
2954         dev_kfree_skb(skb);
2955
2956         return 0;
2957 }
2958
2959 static void tg3_set_txd(struct tg3 *tp, int entry,
2960                         dma_addr_t mapping, int len, u32 flags,
2961                         u32 mss_and_is_end)
2962 {
2963         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2964         int is_end = (mss_and_is_end & 0x1);
2965         u32 mss = (mss_and_is_end >> 1);
2966         u32 vlan_tag = 0;
2967
2968         if (is_end)
2969                 flags |= TXD_FLAG_END;
2970         if (flags & TXD_FLAG_VLAN) {
2971                 vlan_tag = flags >> 16;
2972                 flags &= 0xffff;
2973         }
2974         vlan_tag |= (mss << TXD_MSS_SHIFT);
2975
2976         txd->addr_hi = ((u64) mapping >> 32);
2977         txd->addr_lo = ((u64) mapping & 0xffffffff);
2978         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2979         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2980 }
2981
2982 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2983 {
2984         u32 base = (u32) mapping & 0xffffffff;
2985
2986         return ((base > 0xffffdcc0) &&
2987                 (base + len + 8 < base));
2988 }
2989
2990 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2991 {
2992         struct tg3 *tp = netdev_priv(dev);
2993         dma_addr_t mapping;
2994         unsigned int i;
2995         u32 len, entry, base_flags, mss;
2996         int would_hit_hwbug;
2997         unsigned long flags;
2998
2999         len = skb_headlen(skb);
3000
3001         /* No BH disabling for tx_lock here.  We are running in BH disabled
3002          * context and TX reclaim runs via tp->poll inside of a software
3003          * interrupt.  Rejoice!
3004          *
3005          * Actually, things are not so simple.  If we are to take a hw
3006          * IRQ here, we can deadlock, consider:
3007          *
3008          *       CPU1           CPU2
3009          *   tg3_start_xmit
3010          *   take tp->tx_lock
3011          *                      tg3_timer
3012          *                      take tp->lock
3013          *   tg3_interrupt
3014          *   spin on tp->lock
3015          *                      spin on tp->tx_lock
3016          *
3017          * So we really do need to disable interrupts when taking
3018          * tx_lock here.
3019          */
3020         local_irq_save(flags);
3021         if (!spin_trylock(&tp->tx_lock)) { 
3022                 local_irq_restore(flags);
3023                 return NETDEV_TX_LOCKED; 
3024         } 
3025
3026         /* This is a hard error, log it. */
3027         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3028                 netif_stop_queue(dev);
3029                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3030                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3031                        dev->name);
3032                 return NETDEV_TX_BUSY;
3033         }
3034
3035         entry = tp->tx_prod;
3036         base_flags = 0;
3037         if (skb->ip_summed == CHECKSUM_HW)
3038                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3039 #if TG3_TSO_SUPPORT != 0
3040         mss = 0;
3041         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3042             (mss = skb_shinfo(skb)->tso_size) != 0) {
3043                 int tcp_opt_len, ip_tcp_len;
3044
3045                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3046                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3047
3048                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3049                                TXD_FLAG_CPU_POST_DMA);
3050
3051                 skb->nh.iph->check = 0;
3052                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3053                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3054                                                       skb->nh.iph->daddr,
3055                                                       0, IPPROTO_TCP, 0);
3056
3057                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3058                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3059                                 int tsflags;
3060
3061                                 tsflags = ((skb->nh.iph->ihl - 5) +
3062                                            (tcp_opt_len >> 2));
3063                                 mss |= (tsflags << 11);
3064                         }
3065                 } else {
3066                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3067                                 int tsflags;
3068
3069                                 tsflags = ((skb->nh.iph->ihl - 5) +
3070                                            (tcp_opt_len >> 2));
3071                                 base_flags |= tsflags << 12;
3072                         }
3073                 }
3074         }
3075 #else
3076         mss = 0;
3077 #endif
3078 #if TG3_VLAN_TAG_USED
3079         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3080                 base_flags |= (TXD_FLAG_VLAN |
3081                                (vlan_tx_tag_get(skb) << 16));
3082 #endif
3083
3084         /* Queue skb data, a.k.a. the main skb fragment. */
3085         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3086
3087         tp->tx_buffers[entry].skb = skb;
3088         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3089
3090         would_hit_hwbug = 0;
3091
3092         if (tg3_4g_overflow_test(mapping, len))
3093                 would_hit_hwbug = entry + 1;
3094
3095         tg3_set_txd(tp, entry, mapping, len, base_flags,
3096                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3097
3098         entry = NEXT_TX(entry);
3099
3100         /* Now loop through additional data fragments, and queue them. */
3101         if (skb_shinfo(skb)->nr_frags > 0) {
3102                 unsigned int i, last;
3103
3104                 last = skb_shinfo(skb)->nr_frags - 1;
3105                 for (i = 0; i <= last; i++) {
3106                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3107
3108                         len = frag->size;
3109                         mapping = pci_map_page(tp->pdev,
3110                                                frag->page,
3111                                                frag->page_offset,
3112                                                len, PCI_DMA_TODEVICE);
3113
3114                         tp->tx_buffers[entry].skb = NULL;
3115                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3116
3117                         if (tg3_4g_overflow_test(mapping, len)) {
3118                                 /* Only one should match. */
3119                                 if (would_hit_hwbug)
3120                                         BUG();
3121                                 would_hit_hwbug = entry + 1;
3122                         }
3123
3124                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3125                                 tg3_set_txd(tp, entry, mapping, len,
3126                                             base_flags, (i == last)|(mss << 1));
3127                         else
3128                                 tg3_set_txd(tp, entry, mapping, len,
3129                                             base_flags, (i == last));
3130
3131                         entry = NEXT_TX(entry);
3132                 }
3133         }
3134
3135         if (would_hit_hwbug) {
3136                 u32 last_plus_one = entry;
3137                 u32 start;
3138                 unsigned int len = 0;
3139
3140                 would_hit_hwbug -= 1;
3141                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3142                 entry &= (TG3_TX_RING_SIZE - 1);
3143                 start = entry;
3144                 i = 0;
3145                 while (entry != last_plus_one) {
3146                         if (i == 0)
3147                                 len = skb_headlen(skb);
3148                         else
3149                                 len = skb_shinfo(skb)->frags[i-1].size;
3150
3151                         if (entry == would_hit_hwbug)
3152                                 break;
3153
3154                         i++;
3155                         entry = NEXT_TX(entry);
3156
3157                 }
3158
3159                 /* If the workaround fails due to memory/mapping
3160                  * failure, silently drop this packet.
3161                  */
3162                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3163                                                 entry, len,
3164                                                 last_plus_one,
3165                                                 &start, mss))
3166                         goto out_unlock;
3167
3168                 entry = start;
3169         }
3170
3171         /* Packets are ready, update Tx producer idx local and on card. */
3172         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3173
3174         tp->tx_prod = entry;
3175         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3176                 netif_stop_queue(dev);
3177
3178 out_unlock:
3179         spin_unlock_irqrestore(&tp->tx_lock, flags);
3180
3181         dev->trans_start = jiffies;
3182
3183         return NETDEV_TX_OK;
3184 }
3185
3186 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3187                                int new_mtu)
3188 {
3189         dev->mtu = new_mtu;
3190
3191         if (new_mtu > ETH_DATA_LEN)
3192                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3193         else
3194                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3195 }
3196
3197 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3198 {
3199         struct tg3 *tp = netdev_priv(dev);
3200
3201         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3202                 return -EINVAL;
3203
3204         if (!netif_running(dev)) {
3205                 /* We'll just catch it later when the
3206                  * device is up'd.
3207                  */
3208                 tg3_set_mtu(dev, tp, new_mtu);
3209                 return 0;
3210         }
3211
3212         tg3_netif_stop(tp);
3213         spin_lock_irq(&tp->lock);
3214         spin_lock(&tp->tx_lock);
3215
3216         tg3_halt(tp);
3217
3218         tg3_set_mtu(dev, tp, new_mtu);
3219
3220         tg3_init_hw(tp);
3221
3222         tg3_netif_start(tp);
3223
3224         spin_unlock(&tp->tx_lock);
3225         spin_unlock_irq(&tp->lock);
3226
3227         return 0;
3228 }
3229
3230 /* Free up pending packets in all rx/tx rings.
3231  *
3232  * The chip has been shut down and the driver detached from
3233  * the networking, so no interrupts or new tx packets will
3234  * end up in the driver.  tp->{tx,}lock is not held and we are not
3235  * in an interrupt context and thus may sleep.
3236  */
3237 static void tg3_free_rings(struct tg3 *tp)
3238 {
3239         struct ring_info *rxp;
3240         int i;
3241
3242         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3243                 rxp = &tp->rx_std_buffers[i];
3244
3245                 if (rxp->skb == NULL)
3246                         continue;
3247                 pci_unmap_single(tp->pdev,
3248                                  pci_unmap_addr(rxp, mapping),
3249                                  RX_PKT_BUF_SZ - tp->rx_offset,
3250                                  PCI_DMA_FROMDEVICE);
3251                 dev_kfree_skb_any(rxp->skb);
3252                 rxp->skb = NULL;
3253         }
3254
3255         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3256                 rxp = &tp->rx_jumbo_buffers[i];
3257
3258                 if (rxp->skb == NULL)
3259                         continue;
3260                 pci_unmap_single(tp->pdev,
3261                                  pci_unmap_addr(rxp, mapping),
3262                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3263                                  PCI_DMA_FROMDEVICE);
3264                 dev_kfree_skb_any(rxp->skb);
3265                 rxp->skb = NULL;
3266         }
3267
3268         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3269                 struct tx_ring_info *txp;
3270                 struct sk_buff *skb;
3271                 int j;
3272
3273                 txp = &tp->tx_buffers[i];
3274                 skb = txp->skb;
3275
3276                 if (skb == NULL) {
3277                         i++;
3278                         continue;
3279                 }
3280
3281                 pci_unmap_single(tp->pdev,
3282                                  pci_unmap_addr(txp, mapping),
3283                                  skb_headlen(skb),
3284                                  PCI_DMA_TODEVICE);
3285                 txp->skb = NULL;
3286
3287                 i++;
3288
3289                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3290                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3291                         pci_unmap_page(tp->pdev,
3292                                        pci_unmap_addr(txp, mapping),
3293                                        skb_shinfo(skb)->frags[j].size,
3294                                        PCI_DMA_TODEVICE);
3295                         i++;
3296                 }
3297
3298                 dev_kfree_skb_any(skb);
3299         }
3300 }
3301
3302 /* Initialize tx/rx rings for packet processing.
3303  *
3304  * The chip has been shut down and the driver detached from
3305  * the networking, so no interrupts or new tx packets will
3306  * end up in the driver.  tp->{tx,}lock are held and thus
3307  * we may not sleep.
3308  */
3309 static void tg3_init_rings(struct tg3 *tp)
3310 {
3311         u32 i;
3312
3313         /* Free up all the SKBs. */
3314         tg3_free_rings(tp);
3315
3316         /* Zero out all descriptors. */
3317         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3318         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3319         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3320         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3321
3322         /* Initialize invariants of the rings, we only set this
3323          * stuff once.  This works because the card does not
3324          * write into the rx buffer posting rings.
3325          */
3326         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3327                 struct tg3_rx_buffer_desc *rxd;
3328
3329                 rxd = &tp->rx_std[i];
3330                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3331                         << RXD_LEN_SHIFT;
3332                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3333                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3334                                (i << RXD_OPAQUE_INDEX_SHIFT));
3335         }
3336
3337         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3338                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3339                         struct tg3_rx_buffer_desc *rxd;
3340
3341                         rxd = &tp->rx_jumbo[i];
3342                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3343                                 << RXD_LEN_SHIFT;
3344                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3345                                 RXD_FLAG_JUMBO;
3346                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3347                                (i << RXD_OPAQUE_INDEX_SHIFT));
3348                 }
3349         }
3350
3351         /* Now allocate fresh SKBs for each rx ring. */
3352         for (i = 0; i < tp->rx_pending; i++) {
3353                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3354                                      -1, i) < 0)
3355                         break;
3356         }
3357
3358         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3359                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3360                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3361                                              -1, i) < 0)
3362                                 break;
3363                 }
3364         }
3365 }
3366
3367 /*
3368  * Must not be invoked with interrupt sources disabled and
3369  * the hardware shutdown down.
3370  */
3371 static void tg3_free_consistent(struct tg3 *tp)
3372 {
3373         if (tp->rx_std_buffers) {
3374                 kfree(tp->rx_std_buffers);
3375                 tp->rx_std_buffers = NULL;
3376         }
3377         if (tp->rx_std) {
3378                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3379                                     tp->rx_std, tp->rx_std_mapping);
3380                 tp->rx_std = NULL;
3381         }
3382         if (tp->rx_jumbo) {
3383                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3384                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3385                 tp->rx_jumbo = NULL;
3386         }
3387         if (tp->rx_rcb) {
3388                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3389                                     tp->rx_rcb, tp->rx_rcb_mapping);
3390                 tp->rx_rcb = NULL;
3391         }
3392         if (tp->tx_ring) {
3393                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3394                         tp->tx_ring, tp->tx_desc_mapping);
3395                 tp->tx_ring = NULL;
3396         }
3397         if (tp->hw_status) {
3398                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3399                                     tp->hw_status, tp->status_mapping);
3400                 tp->hw_status = NULL;
3401         }
3402         if (tp->hw_stats) {
3403                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3404                                     tp->hw_stats, tp->stats_mapping);
3405                 tp->hw_stats = NULL;
3406         }
3407 }
3408
3409 /*
3410  * Must not be invoked with interrupt sources disabled and
3411  * the hardware shutdown down.  Can sleep.
3412  */
3413 static int tg3_alloc_consistent(struct tg3 *tp)
3414 {
3415         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3416                                       (TG3_RX_RING_SIZE +
3417                                        TG3_RX_JUMBO_RING_SIZE)) +
3418                                      (sizeof(struct tx_ring_info) *
3419                                       TG3_TX_RING_SIZE),
3420                                      GFP_KERNEL);
3421         if (!tp->rx_std_buffers)
3422                 return -ENOMEM;
3423
3424         memset(tp->rx_std_buffers, 0,
3425                (sizeof(struct ring_info) *
3426                 (TG3_RX_RING_SIZE +
3427                  TG3_RX_JUMBO_RING_SIZE)) +
3428                (sizeof(struct tx_ring_info) *
3429                 TG3_TX_RING_SIZE));
3430
3431         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3432         tp->tx_buffers = (struct tx_ring_info *)
3433                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3434
3435         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3436                                           &tp->rx_std_mapping);
3437         if (!tp->rx_std)
3438                 goto err_out;
3439
3440         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3441                                             &tp->rx_jumbo_mapping);
3442
3443         if (!tp->rx_jumbo)
3444                 goto err_out;
3445
3446         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3447                                           &tp->rx_rcb_mapping);
3448         if (!tp->rx_rcb)
3449                 goto err_out;
3450
3451         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3452                                            &tp->tx_desc_mapping);
3453         if (!tp->tx_ring)
3454                 goto err_out;
3455
3456         tp->hw_status = pci_alloc_consistent(tp->pdev,
3457                                              TG3_HW_STATUS_SIZE,
3458                                              &tp->status_mapping);
3459         if (!tp->hw_status)
3460                 goto err_out;
3461
3462         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3463                                             sizeof(struct tg3_hw_stats),
3464                                             &tp->stats_mapping);
3465         if (!tp->hw_stats)
3466                 goto err_out;
3467
3468         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3469         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3470
3471         return 0;
3472
3473 err_out:
3474         tg3_free_consistent(tp);
3475         return -ENOMEM;
3476 }
3477
3478 #define MAX_WAIT_CNT 1000
3479
3480 /* To stop a block, clear the enable bit and poll till it
3481  * clears.  tp->lock is held.
3482  */
3483 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3484 {
3485         unsigned int i;
3486         u32 val;
3487
3488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3489             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3490                 switch (ofs) {
3491                 case RCVLSC_MODE:
3492                 case DMAC_MODE:
3493                 case MBFREE_MODE:
3494                 case BUFMGR_MODE:
3495                 case MEMARB_MODE:
3496                         /* We can't enable/disable these bits of the
3497                          * 5705/5750, just say success.
3498                          */
3499                         return 0;
3500
3501                 default:
3502                         break;
3503                 };
3504         }
3505
3506         val = tr32(ofs);
3507         val &= ~enable_bit;
3508         tw32_f(ofs, val);
3509
3510         for (i = 0; i < MAX_WAIT_CNT; i++) {
3511                 udelay(100);
3512                 val = tr32(ofs);
3513                 if ((val & enable_bit) == 0)
3514                         break;
3515         }
3516
3517         if (i == MAX_WAIT_CNT) {
3518                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3519                        "ofs=%lx enable_bit=%x\n",
3520                        ofs, enable_bit);
3521                 return -ENODEV;
3522         }
3523
3524         return 0;
3525 }
3526
3527 /* tp->lock is held. */
3528 static int tg3_abort_hw(struct tg3 *tp)
3529 {
3530         int i, err;
3531
3532         tg3_disable_ints(tp);
3533
3534         tp->rx_mode &= ~RX_MODE_ENABLE;
3535         tw32_f(MAC_RX_MODE, tp->rx_mode);
3536         udelay(10);
3537
3538         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3539         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3540         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3541         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3542         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3543         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3544
3545         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3546         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3547         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3548         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3549         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3550         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3551         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3552         if (err)
3553                 goto out;
3554
3555         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3556         tw32_f(MAC_MODE, tp->mac_mode);
3557         udelay(40);
3558
3559         tp->tx_mode &= ~TX_MODE_ENABLE;
3560         tw32_f(MAC_TX_MODE, tp->tx_mode);
3561
3562         for (i = 0; i < MAX_WAIT_CNT; i++) {
3563                 udelay(100);
3564                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3565                         break;
3566         }
3567         if (i >= MAX_WAIT_CNT) {
3568                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3569                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3570                        tp->dev->name, tr32(MAC_TX_MODE));
3571                 return -ENODEV;
3572         }
3573
3574         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3575         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3576         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3577
3578         tw32(FTQ_RESET, 0xffffffff);
3579         tw32(FTQ_RESET, 0x00000000);
3580
3581         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3582         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3583         if (err)
3584                 goto out;
3585
3586         if (tp->hw_status)
3587                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3588         if (tp->hw_stats)
3589                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3590
3591 out:
3592         return err;
3593 }
3594
3595 /* tp->lock is held. */
3596 static int tg3_nvram_lock(struct tg3 *tp)
3597 {
3598         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3599                 int i;
3600
3601                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3602                 for (i = 0; i < 8000; i++) {
3603                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3604                                 break;
3605                         udelay(20);
3606                 }
3607                 if (i == 8000)
3608                         return -ENODEV;
3609         }
3610         return 0;
3611 }
3612
3613 /* tp->lock is held. */
3614 static void tg3_nvram_unlock(struct tg3 *tp)
3615 {
3616         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3617                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3618 }
3619
3620 /* tp->lock is held. */
3621 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3622 {
3623         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3624                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3625
3626         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3627                 switch (kind) {
3628                 case RESET_KIND_INIT:
3629                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3630                                       DRV_STATE_START);
3631                         break;
3632
3633                 case RESET_KIND_SHUTDOWN:
3634                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3635                                       DRV_STATE_UNLOAD);
3636                         break;
3637
3638                 case RESET_KIND_SUSPEND:
3639                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3640                                       DRV_STATE_SUSPEND);
3641                         break;
3642
3643                 default:
3644                         break;
3645                 };
3646         }
3647 }
3648
3649 /* tp->lock is held. */
3650 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3651 {
3652         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3653                 switch (kind) {
3654                 case RESET_KIND_INIT:
3655                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3656                                       DRV_STATE_START_DONE);
3657                         break;
3658
3659                 case RESET_KIND_SHUTDOWN:
3660                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3661                                       DRV_STATE_UNLOAD_DONE);
3662                         break;
3663
3664                 default:
3665                         break;
3666                 };
3667         }
3668 }
3669
3670 /* tp->lock is held. */
3671 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3672 {
3673         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3674                 switch (kind) {
3675                 case RESET_KIND_INIT:
3676                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3677                                       DRV_STATE_START);
3678                         break;
3679
3680                 case RESET_KIND_SHUTDOWN:
3681                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3682                                       DRV_STATE_UNLOAD);
3683                         break;
3684
3685                 case RESET_KIND_SUSPEND:
3686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3687                                       DRV_STATE_SUSPEND);
3688                         break;
3689
3690                 default:
3691                         break;
3692                 };
3693         }
3694 }
3695
3696 static void tg3_stop_fw(struct tg3 *);
3697
3698 /* tp->lock is held. */
3699 static int tg3_chip_reset(struct tg3 *tp)
3700 {
3701         u32 val;
3702         u32 flags_save;
3703         int i;
3704
3705         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3706                 tg3_nvram_lock(tp);
3707
3708         /*
3709          * We must avoid the readl() that normally takes place.
3710          * It locks machines, causes machine checks, and other
3711          * fun things.  So, temporarily disable the 5701
3712          * hardware workaround, while we do the reset.
3713          */
3714         flags_save = tp->tg3_flags;
3715         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3716
3717         /* do the reset */
3718         val = GRC_MISC_CFG_CORECLK_RESET;
3719
3720         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3721                 if (tr32(0x7e2c) == 0x60) {
3722                         tw32(0x7e2c, 0x20);
3723                 }
3724                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3725                         tw32(GRC_MISC_CFG, (1 << 29));
3726                         val |= (1 << 29);
3727                 }
3728         }
3729
3730         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3731             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3732                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3733         tw32(GRC_MISC_CFG, val);
3734
3735         /* restore 5701 hardware bug workaround flag */
3736         tp->tg3_flags = flags_save;
3737
3738         /* Unfortunately, we have to delay before the PCI read back.
3739          * Some 575X chips even will not respond to a PCI cfg access
3740          * when the reset command is given to the chip.
3741          *
3742          * How do these hardware designers expect things to work
3743          * properly if the PCI write is posted for a long period
3744          * of time?  It is always necessary to have some method by
3745          * which a register read back can occur to push the write
3746          * out which does the reset.
3747          *
3748          * For most tg3 variants the trick below was working.
3749          * Ho hum...
3750          */
3751         udelay(120);
3752
3753         /* Flush PCI posted writes.  The normal MMIO registers
3754          * are inaccessible at this time so this is the only
3755          * way to make this reliably (actually, this is no longer
3756          * the case, see above).  I tried to use indirect
3757          * register read/write but this upset some 5701 variants.
3758          */
3759         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3760
3761         udelay(120);
3762
3763         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3764                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3765                         int i;
3766                         u32 cfg_val;
3767
3768                         /* Wait for link training to complete.  */
3769                         for (i = 0; i < 5000; i++)
3770                                 udelay(100);
3771
3772                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3773                         pci_write_config_dword(tp->pdev, 0xc4,
3774                                                cfg_val | (1 << 15));
3775                 }
3776                 /* Set PCIE max payload size and clear error status.  */
3777                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3778         }
3779
3780         /* Re-enable indirect register accesses. */
3781         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3782                                tp->misc_host_ctrl);
3783
3784         /* Set MAX PCI retry to zero. */
3785         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3786         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3787             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3788                 val |= PCISTATE_RETRY_SAME_DMA;
3789         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3790
3791         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3792
3793         /* Make sure PCI-X relaxed ordering bit is clear. */
3794         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3795         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3796         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3797
3798         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3799
3800         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3801                 tg3_stop_fw(tp);
3802                 tw32(0x5000, 0x400);
3803         }
3804
3805         tw32(GRC_MODE, tp->grc_mode);
3806
3807         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3808                 u32 val = tr32(0xc4);
3809
3810                 tw32(0xc4, val | (1 << 15));
3811         }
3812
3813         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3815                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3816                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3817                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3818                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3819         }
3820
3821         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3822                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3823                 tw32_f(MAC_MODE, tp->mac_mode);
3824         } else
3825                 tw32_f(MAC_MODE, 0);
3826         udelay(40);
3827
3828         /* Wait for firmware initialization to complete. */
3829         for (i = 0; i < 100000; i++) {
3830                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3831                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3832                         break;
3833                 udelay(10);
3834         }
3835         if (i >= 100000 &&
3836             !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3837                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3838                        "firmware will not restart magic=%08x\n",
3839                        tp->dev->name, val);
3840                 return -ENODEV;
3841         }
3842
3843         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3844             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3845                 u32 val = tr32(0x7c00);
3846
3847                 tw32(0x7c00, val | (1 << 25));
3848         }
3849
3850         /* Reprobe ASF enable state.  */
3851         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3852         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3853         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3854         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3855                 u32 nic_cfg;
3856
3857                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3858                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3859                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3860                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3861                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3862                 }
3863         }
3864
3865         return 0;
3866 }
3867
3868 /* tp->lock is held. */
3869 static void tg3_stop_fw(struct tg3 *tp)
3870 {
3871         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3872                 u32 val;
3873                 int i;
3874
3875                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3876                 val = tr32(GRC_RX_CPU_EVENT);
3877                 val |= (1 << 14);
3878                 tw32(GRC_RX_CPU_EVENT, val);
3879
3880                 /* Wait for RX cpu to ACK the event.  */
3881                 for (i = 0; i < 100; i++) {
3882                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3883                                 break;
3884                         udelay(1);
3885                 }
3886         }
3887 }
3888
3889 /* tp->lock is held. */
3890 static int tg3_halt(struct tg3 *tp)
3891 {
3892         int err;
3893
3894         tg3_stop_fw(tp);
3895
3896         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3897
3898         tg3_abort_hw(tp);
3899         err = tg3_chip_reset(tp);
3900
3901         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3902         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3903
3904         if (err)
3905                 return err;
3906
3907         return 0;
3908 }
3909
3910 #define TG3_FW_RELEASE_MAJOR    0x0
3911 #define TG3_FW_RELASE_MINOR     0x0
3912 #define TG3_FW_RELEASE_FIX      0x0
3913 #define TG3_FW_START_ADDR       0x08000000
3914 #define TG3_FW_TEXT_ADDR        0x08000000
3915 #define TG3_FW_TEXT_LEN         0x9c0
3916 #define TG3_FW_RODATA_ADDR      0x080009c0
3917 #define TG3_FW_RODATA_LEN       0x60
3918 #define TG3_FW_DATA_ADDR        0x08000a40
3919 #define TG3_FW_DATA_LEN         0x20
3920 #define TG3_FW_SBSS_ADDR        0x08000a60
3921 #define TG3_FW_SBSS_LEN         0xc
3922 #define TG3_FW_BSS_ADDR         0x08000a70
3923 #define TG3_FW_BSS_LEN          0x10
3924
3925 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3926         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3927         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3928         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3929         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3930         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3931         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3932         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3933         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3934         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3935         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3936         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3937         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3938         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3939         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3940         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3941         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3942         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3943         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3944         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3945         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3946         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3947         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3948         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3949         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3950         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3951         0, 0, 0, 0, 0, 0,
3952         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3953         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3954         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3955         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3956         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3957         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3958         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3959         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3960         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3961         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3962         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3963         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3964         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3965         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3966         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3967         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3968         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3969         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3970         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3971         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3972         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3973         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3974         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3975         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3976         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3977         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3978         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3979         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3980         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3981         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3982         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3983         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3984         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3985         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3986         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3987         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3988         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3989         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3990         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3991         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3992         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3993         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3994         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3995         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3996         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3997         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3998         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3999         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4000         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4001         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4002         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4003         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4004         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4005         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4006         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4007         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4008         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4009         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4010         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4011         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4012         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4013         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4014         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4015         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4016         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4017 };
4018
4019 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4020         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4021         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4022         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4023         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4024         0x00000000
4025 };
4026
4027 #if 0 /* All zeros, don't eat up space with it. */
4028 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4029         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4030         0x00000000, 0x00000000, 0x00000000, 0x00000000
4031 };
4032 #endif
4033
4034 #define RX_CPU_SCRATCH_BASE     0x30000
4035 #define RX_CPU_SCRATCH_SIZE     0x04000
4036 #define TX_CPU_SCRATCH_BASE     0x34000
4037 #define TX_CPU_SCRATCH_SIZE     0x04000
4038
4039 /* tp->lock is held. */
4040 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4041 {
4042         int i;
4043
4044         if (offset == TX_CPU_BASE &&
4045             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4046                 BUG();
4047
4048         if (offset == RX_CPU_BASE) {
4049                 for (i = 0; i < 10000; i++) {
4050                         tw32(offset + CPU_STATE, 0xffffffff);
4051                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4052                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4053                                 break;
4054                 }
4055
4056                 tw32(offset + CPU_STATE, 0xffffffff);
4057                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4058                 udelay(10);
4059         } else {
4060                 for (i = 0; i < 10000; i++) {
4061                         tw32(offset + CPU_STATE, 0xffffffff);
4062                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4063                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4064                                 break;
4065                 }
4066         }
4067
4068         if (i >= 10000) {
4069                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4070                        "and %s CPU\n",
4071                        tp->dev->name,
4072                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4073                 return -ENODEV;
4074         }
4075         return 0;
4076 }
4077
4078 struct fw_info {
4079         unsigned int text_base;
4080         unsigned int text_len;
4081         u32 *text_data;
4082         unsigned int rodata_base;
4083         unsigned int rodata_len;
4084         u32 *rodata_data;
4085         unsigned int data_base;
4086         unsigned int data_len;
4087         u32 *data_data;
4088 };
4089
4090 /* tp->lock is held. */
4091 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4092                                  int cpu_scratch_size, struct fw_info *info)
4093 {
4094         int err, i;
4095         u32 orig_tg3_flags = tp->tg3_flags;
4096         void (*write_op)(struct tg3 *, u32, u32);
4097
4098         if (cpu_base == TX_CPU_BASE &&
4099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4100                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4101                        "TX cpu firmware on %s which is 5705.\n",
4102                        tp->dev->name);
4103                 return -EINVAL;
4104         }
4105
4106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4107                 write_op = tg3_write_mem;
4108         else
4109                 write_op = tg3_write_indirect_reg32;
4110
4111         /* Force use of PCI config space for indirect register
4112          * write calls.
4113          */
4114         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4115
4116         err = tg3_halt_cpu(tp, cpu_base);
4117         if (err)
4118                 goto out;
4119
4120         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4121                 write_op(tp, cpu_scratch_base + i, 0);
4122         tw32(cpu_base + CPU_STATE, 0xffffffff);
4123         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4124         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4125                 write_op(tp, (cpu_scratch_base +
4126                               (info->text_base & 0xffff) +
4127                               (i * sizeof(u32))),
4128                          (info->text_data ?
4129                           info->text_data[i] : 0));
4130         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4131                 write_op(tp, (cpu_scratch_base +
4132                               (info->rodata_base & 0xffff) +
4133                               (i * sizeof(u32))),
4134                          (info->rodata_data ?
4135                           info->rodata_data[i] : 0));
4136         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4137                 write_op(tp, (cpu_scratch_base +
4138                               (info->data_base & 0xffff) +
4139                               (i * sizeof(u32))),
4140                          (info->data_data ?
4141                           info->data_data[i] : 0));
4142
4143         err = 0;
4144
4145 out:
4146         tp->tg3_flags = orig_tg3_flags;
4147         return err;
4148 }
4149
4150 /* tp->lock is held. */
4151 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4152 {
4153         struct fw_info info;
4154         int err, i;
4155
4156         info.text_base = TG3_FW_TEXT_ADDR;
4157         info.text_len = TG3_FW_TEXT_LEN;
4158         info.text_data = &tg3FwText[0];
4159         info.rodata_base = TG3_FW_RODATA_ADDR;
4160         info.rodata_len = TG3_FW_RODATA_LEN;
4161         info.rodata_data = &tg3FwRodata[0];
4162         info.data_base = TG3_FW_DATA_ADDR;
4163         info.data_len = TG3_FW_DATA_LEN;
4164         info.data_data = NULL;
4165
4166         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4167                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4168                                     &info);
4169         if (err)
4170                 return err;
4171
4172         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4173                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4174                                     &info);
4175         if (err)
4176                 return err;
4177
4178         /* Now startup only the RX cpu. */
4179         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4180         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4181
4182         for (i = 0; i < 5; i++) {
4183                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4184                         break;
4185                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4186                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4187                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4188                 udelay(1000);
4189         }
4190         if (i >= 5) {
4191                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4192                        "to set RX CPU PC, is %08x should be %08x\n",
4193                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4194                        TG3_FW_TEXT_ADDR);
4195                 return -ENODEV;
4196         }
4197         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4198         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4199
4200         return 0;
4201 }
4202
4203 #if TG3_TSO_SUPPORT != 0
4204
4205 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4206 #define TG3_TSO_FW_RELASE_MINOR         0x6
4207 #define TG3_TSO_FW_RELEASE_FIX          0x0
4208 #define TG3_TSO_FW_START_ADDR           0x08000000
4209 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4210 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4211 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4212 #define TG3_TSO_FW_RODATA_LEN           0x60
4213 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4214 #define TG3_TSO_FW_DATA_LEN             0x30
4215 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4216 #define TG3_TSO_FW_SBSS_LEN             0x2c
4217 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4218 #define TG3_TSO_FW_BSS_LEN              0x894
4219
4220 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4221         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4222         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4223         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4224         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4225         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4226         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4227         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4228         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4229         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4230         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4231         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4232         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4233         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4234         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4235         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4236         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4237         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4238         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4239         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4240         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4241         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4242         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4243         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4244         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4245         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4246         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4247         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4248         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4249         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4250         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4251         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4252         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4253         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4254         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4255         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4256         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4257         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4258         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4259         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4260         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4261         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4262         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4263         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4264         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4265         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4266         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4267         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4268         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4269         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4270         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4271         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4272         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4273         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4274         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4275         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4276         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4277         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4278         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4279         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4280         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4281         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4282         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4283         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4284         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4285         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4286         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4287         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4288         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4289         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4290         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4291         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4292         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4293         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4294         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4295         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4296         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4297         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4298         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4299         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4300         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4301         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4302         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4303         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4304         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4305         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4306         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4307         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4308         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4309         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4310         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4311         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4312         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4313         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4314         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4315         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4316         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4317         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4318         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4319         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4320         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4321         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4322         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4323         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4324         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4325         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4326         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4327         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4328         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4329         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4330         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4331         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4332         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4333         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4334         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4335         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4336         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4337         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4338         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4339         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4340         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4341         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4342         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4343         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4344         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4345         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4346         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4347         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4348         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4349         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4350         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4351         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4352         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4353         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4354         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4355         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4356         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4357         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4358         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4359         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4360         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4361         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4362         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4363         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4364         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4365         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4366         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4367         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4368         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4369         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4370         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4371         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4372         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4373         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4374         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4375         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4376         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4377         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4378         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4379         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4380         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4381         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4382         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4383         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4384         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4385         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4386         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4387         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4388         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4389         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4390         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4391         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4392         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4393         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4394         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4395         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4396         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4397         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4398         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4399         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4400         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4401         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4402         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4403         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4404         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4405         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4406         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4407         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4408         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4409         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4410         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4411         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4412         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4413         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4414         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4415         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4416         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4417         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4418         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4419         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4420         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4421         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4422         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4423         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4424         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4425         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4426         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4427         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4428         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4429         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4430         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4431         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4432         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4433         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4434         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4435         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4436         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4437         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4438         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4439         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4440         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4441         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4442         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4443         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4444         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4445         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4446         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4447         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4448         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4449         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4450         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4451         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4452         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4453         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4454         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4455         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4456         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4457         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4458         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4459         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4460         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4461         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4462         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4463         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4464         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4465         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4466         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4467         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4468         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4469         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4470         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4471         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4472         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4473         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4474         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4475         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4476         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4477         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4478         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4479         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4480         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4481         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4482         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4483         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4484         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4485         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4486         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4487         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4488         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4489         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4490         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4491         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4492         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4493         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4494         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4495         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4496         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4497         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4498         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4499         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4500         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4501         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4502         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4503         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4504         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4505 };
4506
4507 u32 tg3TsoFwRodata[] = {
4508         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4509         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4510         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4511         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4512         0x00000000,
4513 };
4514
4515 u32 tg3TsoFwData[] = {
4516         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4517         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4518         0x00000000,
4519 };
4520
4521 /* 5705 needs a special version of the TSO firmware.  */
4522 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4523 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4524 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4525 #define TG3_TSO5_FW_START_ADDR          0x00010000
4526 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4527 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4528 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4529 #define TG3_TSO5_FW_RODATA_LEN          0x50
4530 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4531 #define TG3_TSO5_FW_DATA_LEN            0x20
4532 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4533 #define TG3_TSO5_FW_SBSS_LEN            0x28
4534 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4535 #define TG3_TSO5_FW_BSS_LEN             0x88
4536
4537 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4538         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4539         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4540         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4541         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4542         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4543         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4544         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4545         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4546         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4547         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4548         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4549         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4550         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4551         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4552         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4553         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4554         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4555         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4556         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4557         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4558         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4559         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4560         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4561         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4562         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4563         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4564         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4565         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4566         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4567         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4568         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4569         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4570         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4571         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4572         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4573         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4574         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4575         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4576         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4577         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4578         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4579         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4580         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4581         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4582         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4583         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4584         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4585         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4586         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4587         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4588         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4589         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4590         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4591         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4592         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4593         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4594         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4595         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4596         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4597         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4598         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4599         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4600         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4601         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4602         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4603         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4604         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4605         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4606         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4607         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4608         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4609         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4610         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4611         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4612         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4613         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4614         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4615         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4616         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4617         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4618         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4619         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4620         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4621         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4622         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4623         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4624         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4625         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4626         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4627         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4628         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4629         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4630         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4631         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4632         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4633         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4634         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4635         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4636         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4637         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4638         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4639         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4640         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4641         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4642         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4643         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4644         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4645         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4646         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4647         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4648         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4649         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4650         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4651         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4652         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4653         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4654         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4655         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4656         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4657         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4658         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4659         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4660         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4661         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4662         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4663         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4664         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4665         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4666         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4667         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4668         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4669         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4670         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4671         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4672         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4673         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4674         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4675         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4676         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4677         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4678         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4679         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4680         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4681         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4682         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4683         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4684         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4685         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4686         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4687         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4688         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4689         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4690         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4691         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4692         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4693         0x00000000, 0x00000000, 0x00000000,
4694 };
4695
4696 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4697         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4698         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4699         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4700         0x00000000, 0x00000000, 0x00000000,
4701 };
4702
4703 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4704         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4705         0x00000000, 0x00000000, 0x00000000,
4706 };
4707
4708 /* tp->lock is held. */
4709 static int tg3_load_tso_firmware(struct tg3 *tp)
4710 {
4711         struct fw_info info;
4712         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4713         int err, i;
4714
4715         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4716                 return 0;
4717
4718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4719                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4720                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4721                 info.text_data = &tg3Tso5FwText[0];
4722                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4723                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4724                 info.rodata_data = &tg3Tso5FwRodata[0];
4725                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4726                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4727                 info.data_data = &tg3Tso5FwData[0];
4728                 cpu_base = RX_CPU_BASE;
4729                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4730                 cpu_scratch_size = (info.text_len +
4731                                     info.rodata_len +
4732                                     info.data_len +
4733                                     TG3_TSO5_FW_SBSS_LEN +
4734                                     TG3_TSO5_FW_BSS_LEN);
4735         } else {
4736                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4737                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4738                 info.text_data = &tg3TsoFwText[0];
4739                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4740                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4741                 info.rodata_data = &tg3TsoFwRodata[0];
4742                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4743                 info.data_len = TG3_TSO_FW_DATA_LEN;
4744                 info.data_data = &tg3TsoFwData[0];
4745                 cpu_base = TX_CPU_BASE;
4746                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4747                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4748         }
4749
4750         err = tg3_load_firmware_cpu(tp, cpu_base,
4751                                     cpu_scratch_base, cpu_scratch_size,
4752                                     &info);
4753         if (err)
4754                 return err;
4755
4756         /* Now startup the cpu. */
4757         tw32(cpu_base + CPU_STATE, 0xffffffff);
4758         tw32_f(cpu_base + CPU_PC,    info.text_base);
4759
4760         for (i = 0; i < 5; i++) {
4761                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4762                         break;
4763                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4764                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4765                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4766                 udelay(1000);
4767         }
4768         if (i >= 5) {
4769                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4770                        "to set CPU PC, is %08x should be %08x\n",
4771                        tp->dev->name, tr32(cpu_base + CPU_PC),
4772                        info.text_base);
4773                 return -ENODEV;
4774         }
4775         tw32(cpu_base + CPU_STATE, 0xffffffff);
4776         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4777         return 0;
4778 }
4779
4780 #endif /* TG3_TSO_SUPPORT != 0 */
4781
4782 /* tp->lock is held. */
4783 static void __tg3_set_mac_addr(struct tg3 *tp)
4784 {
4785         u32 addr_high, addr_low;
4786         int i;
4787
4788         addr_high = ((tp->dev->dev_addr[0] << 8) |
4789                      tp->dev->dev_addr[1]);
4790         addr_low = ((tp->dev->dev_addr[2] << 24) |
4791                     (tp->dev->dev_addr[3] << 16) |
4792                     (tp->dev->dev_addr[4] <<  8) |
4793                     (tp->dev->dev_addr[5] <<  0));
4794         for (i = 0; i < 4; i++) {
4795                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4796                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4797         }
4798
4799         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4800             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4801             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4802                 for (i = 0; i < 12; i++) {
4803                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4804                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4805                 }
4806         }
4807
4808         addr_high = (tp->dev->dev_addr[0] +
4809                      tp->dev->dev_addr[1] +
4810                      tp->dev->dev_addr[2] +
4811                      tp->dev->dev_addr[3] +
4812                      tp->dev->dev_addr[4] +
4813                      tp->dev->dev_addr[5]) &
4814                 TX_BACKOFF_SEED_MASK;
4815         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4816 }
4817
4818 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4819 {
4820         struct tg3 *tp = netdev_priv(dev);
4821         struct sockaddr *addr = p;
4822
4823         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4824
4825         spin_lock_irq(&tp->lock);
4826         __tg3_set_mac_addr(tp);
4827         spin_unlock_irq(&tp->lock);
4828
4829         return 0;
4830 }
4831
4832 /* tp->lock is held. */
4833 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4834                            dma_addr_t mapping, u32 maxlen_flags,
4835                            u32 nic_addr)
4836 {
4837         tg3_write_mem(tp,
4838                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4839                       ((u64) mapping >> 32));
4840         tg3_write_mem(tp,
4841                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4842                       ((u64) mapping & 0xffffffff));
4843         tg3_write_mem(tp,
4844                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4845                        maxlen_flags);
4846
4847         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4848                 tg3_write_mem(tp,
4849                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4850                               nic_addr);
4851 }
4852
4853 static void __tg3_set_rx_mode(struct net_device *);
4854
4855 /* tp->lock is held. */
4856 static int tg3_reset_hw(struct tg3 *tp)
4857 {
4858         u32 val, rdmac_mode;
4859         int i, err, limit;
4860
4861         tg3_disable_ints(tp);
4862
4863         tg3_stop_fw(tp);
4864
4865         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4866
4867         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4868                 err = tg3_abort_hw(tp);
4869                 if (err)
4870                         return err;
4871         }
4872
4873         err = tg3_chip_reset(tp);
4874         if (err)
4875                 return err;
4876
4877         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4878
4879         /* This works around an issue with Athlon chipsets on
4880          * B3 tigon3 silicon.  This bit has no effect on any
4881          * other revision.  But do not set this on PCI Express
4882          * chips.
4883          */
4884         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4885                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4886         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4887
4888         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4889             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4890                 val = tr32(TG3PCI_PCISTATE);
4891                 val |= PCISTATE_RETRY_SAME_DMA;
4892                 tw32(TG3PCI_PCISTATE, val);
4893         }
4894
4895         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4896                 /* Enable some hw fixes.  */
4897                 val = tr32(TG3PCI_MSI_DATA);
4898                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4899                 tw32(TG3PCI_MSI_DATA, val);
4900         }
4901
4902         /* Descriptor ring init may make accesses to the
4903          * NIC SRAM area to setup the TX descriptors, so we
4904          * can only do this after the hardware has been
4905          * successfully reset.
4906          */
4907         tg3_init_rings(tp);
4908
4909         /* This value is determined during the probe time DMA
4910          * engine test, tg3_test_dma.
4911          */
4912         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4913
4914         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4915                           GRC_MODE_4X_NIC_SEND_RINGS |
4916                           GRC_MODE_NO_TX_PHDR_CSUM |
4917                           GRC_MODE_NO_RX_PHDR_CSUM);
4918         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4919         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4920                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4921         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4922                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4923
4924         tw32(GRC_MODE,
4925              tp->grc_mode |
4926              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4927
4928         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4929         val = tr32(GRC_MISC_CFG);
4930         val &= ~0xff;
4931         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4932         tw32(GRC_MISC_CFG, val);
4933
4934         /* Initialize MBUF/DESC pool. */
4935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4936                 /* Do nothing.  */
4937         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4938                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4939                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4940                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4941                 else
4942                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4943                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4944                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4945         }
4946 #if TG3_TSO_SUPPORT != 0
4947         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4948                 int fw_len;
4949
4950                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4951                           TG3_TSO5_FW_RODATA_LEN +
4952                           TG3_TSO5_FW_DATA_LEN +
4953                           TG3_TSO5_FW_SBSS_LEN +
4954                           TG3_TSO5_FW_BSS_LEN);
4955                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4956                 tw32(BUFMGR_MB_POOL_ADDR,
4957                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4958                 tw32(BUFMGR_MB_POOL_SIZE,
4959                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4960         }
4961 #endif
4962
4963         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4964                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4965                      tp->bufmgr_config.mbuf_read_dma_low_water);
4966                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4967                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4968                 tw32(BUFMGR_MB_HIGH_WATER,
4969                      tp->bufmgr_config.mbuf_high_water);
4970         } else {
4971                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4972                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4973                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4974                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4975                 tw32(BUFMGR_MB_HIGH_WATER,
4976                      tp->bufmgr_config.mbuf_high_water_jumbo);
4977         }
4978         tw32(BUFMGR_DMA_LOW_WATER,
4979              tp->bufmgr_config.dma_low_water);
4980         tw32(BUFMGR_DMA_HIGH_WATER,
4981              tp->bufmgr_config.dma_high_water);
4982
4983         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4984         for (i = 0; i < 2000; i++) {
4985                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4986                         break;
4987                 udelay(10);
4988         }
4989         if (i >= 2000) {
4990                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4991                        tp->dev->name);
4992                 return -ENODEV;
4993         }
4994
4995         /* Setup replenish threshold. */
4996         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4997
4998         /* Initialize TG3_BDINFO's at:
4999          *  RCVDBDI_STD_BD:     standard eth size rx ring
5000          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5001          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5002          *
5003          * like so:
5004          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5005          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5006          *                              ring attribute flags
5007          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5008          *
5009          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5010          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5011          *
5012          * The size of each ring is fixed in the firmware, but the location is
5013          * configurable.
5014          */
5015         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5016              ((u64) tp->rx_std_mapping >> 32));
5017         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5018              ((u64) tp->rx_std_mapping & 0xffffffff));
5019         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5020              NIC_SRAM_RX_BUFFER_DESC);
5021
5022         /* Don't even try to program the JUMBO/MINI buffer descriptor
5023          * configs on 5705.
5024          */
5025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5026             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5027                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5028                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5029         } else {
5030                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5031                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5032
5033                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5034                      BDINFO_FLAGS_DISABLED);
5035
5036                 /* Setup replenish threshold. */
5037                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5038
5039                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5040                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5041                              ((u64) tp->rx_jumbo_mapping >> 32));
5042                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5043                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5044                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5045                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5046                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5047                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5048                 } else {
5049                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5050                              BDINFO_FLAGS_DISABLED);
5051                 }
5052
5053         }
5054
5055         /* There is only one send ring on 5705/5750, no need to explicitly
5056          * disable the others.
5057          */
5058         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5059             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5060                 /* Clear out send RCB ring in SRAM. */
5061                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5062                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5063                                       BDINFO_FLAGS_DISABLED);
5064         }
5065
5066         tp->tx_prod = 0;
5067         tp->tx_cons = 0;
5068         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5069         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5070
5071         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5072                        tp->tx_desc_mapping,
5073                        (TG3_TX_RING_SIZE <<
5074                         BDINFO_FLAGS_MAXLEN_SHIFT),
5075                        NIC_SRAM_TX_BUFFER_DESC);
5076
5077         /* There is only one receive return ring on 5705/5750, no need
5078          * to explicitly disable the others.
5079          */
5080         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5081             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5082                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5083                      i += TG3_BDINFO_SIZE) {
5084                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5085                                       BDINFO_FLAGS_DISABLED);
5086                 }
5087         }
5088
5089         tp->rx_rcb_ptr = 0;
5090         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5091
5092         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5093                        tp->rx_rcb_mapping,
5094                        (TG3_RX_RCB_RING_SIZE(tp) <<
5095                         BDINFO_FLAGS_MAXLEN_SHIFT),
5096                        0);
5097
5098         tp->rx_std_ptr = tp->rx_pending;
5099         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5100                      tp->rx_std_ptr);
5101
5102         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5103                                                 tp->rx_jumbo_pending : 0;
5104         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5105                      tp->rx_jumbo_ptr);
5106
5107         /* Initialize MAC address and backoff seed. */
5108         __tg3_set_mac_addr(tp);
5109
5110         /* MTU + ethernet header + FCS + optional VLAN tag */
5111         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5112
5113         /* The slot time is changed by tg3_setup_phy if we
5114          * run at gigabit with half duplex.
5115          */
5116         tw32(MAC_TX_LENGTHS,
5117              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5118              (6 << TX_LENGTHS_IPG_SHIFT) |
5119              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5120
5121         /* Receive rules. */
5122         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5123         tw32(RCVLPC_CONFIG, 0x0181);
5124
5125         /* Calculate RDMAC_MODE setting early, we need it to determine
5126          * the RCVLPC_STATE_ENABLE mask.
5127          */
5128         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5129                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5130                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5131                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5132                       RDMAC_MODE_LNGREAD_ENAB);
5133         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5134                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5135         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5136              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5137             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5138                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5139                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5140                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5141                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5142                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5143                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5144                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5145                 }
5146         }
5147
5148 #if TG3_TSO_SUPPORT != 0
5149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5150                 rdmac_mode |= (1 << 27);
5151 #endif
5152
5153         /* Receive/send statistics. */
5154         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5155             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5156                 val = tr32(RCVLPC_STATS_ENABLE);
5157                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5158                 tw32(RCVLPC_STATS_ENABLE, val);
5159         } else {
5160                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5161         }
5162         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5163         tw32(SNDDATAI_STATSENAB, 0xffffff);
5164         tw32(SNDDATAI_STATSCTRL,
5165              (SNDDATAI_SCTRL_ENABLE |
5166               SNDDATAI_SCTRL_FASTUPD));
5167
5168         /* Setup host coalescing engine. */
5169         tw32(HOSTCC_MODE, 0);
5170         for (i = 0; i < 2000; i++) {
5171                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5172                         break;
5173                 udelay(10);
5174         }
5175
5176         tw32(HOSTCC_RXCOL_TICKS, 0);
5177         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5178         tw32(HOSTCC_RXMAX_FRAMES, 1);
5179         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5180         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5181             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5182                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5183                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5184         }
5185         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5186         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5187
5188         /* set status block DMA address */
5189         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5190              ((u64) tp->status_mapping >> 32));
5191         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5192              ((u64) tp->status_mapping & 0xffffffff));
5193
5194         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5195             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5196                 /* Status/statistics block address.  See tg3_timer,
5197                  * the tg3_periodic_fetch_stats call there, and
5198                  * tg3_get_stats to see how this works for 5705/5750 chips.
5199                  */
5200                 tw32(HOSTCC_STAT_COAL_TICKS,
5201                      DEFAULT_STAT_COAL_TICKS);
5202                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5203                      ((u64) tp->stats_mapping >> 32));
5204                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5205                      ((u64) tp->stats_mapping & 0xffffffff));
5206                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5207                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5208         }
5209
5210         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5211
5212         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5213         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5214         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5215             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5216                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5217
5218         /* Clear statistics/status block in chip, and status block in ram. */
5219         for (i = NIC_SRAM_STATS_BLK;
5220              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5221              i += sizeof(u32)) {
5222                 tg3_write_mem(tp, i, 0);
5223                 udelay(40);
5224         }
5225         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5226
5227         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5228                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5229         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5230         udelay(40);
5231
5232         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5234                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5235                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5236         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5237         udelay(100);
5238
5239         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5240         tr32(MAILBOX_INTERRUPT_0);
5241
5242         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5243             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5244                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5245                 udelay(40);
5246         }
5247
5248         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5249                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5250                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5251                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5252                WDMAC_MODE_LNGREAD_ENAB);
5253
5254         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5255              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5256             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5257                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5258                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5259                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5260                         /* nothing */
5261                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5262                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5263                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5264                         val |= WDMAC_MODE_RX_ACCEL;
5265                 }
5266         }
5267
5268         tw32_f(WDMAC_MODE, val);
5269         udelay(40);
5270
5271         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5272                 val = tr32(TG3PCI_X_CAPS);
5273                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5274                         val &= ~PCIX_CAPS_BURST_MASK;
5275                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5276                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5277                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5278                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5279                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5280                                 val |= (tp->split_mode_max_reqs <<
5281                                         PCIX_CAPS_SPLIT_SHIFT);
5282                 }
5283                 tw32(TG3PCI_X_CAPS, val);
5284         }
5285
5286         tw32_f(RDMAC_MODE, rdmac_mode);
5287         udelay(40);
5288
5289         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5290         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5291             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5292                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5293         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5294         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5295         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5296         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5297         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5298 #if TG3_TSO_SUPPORT != 0
5299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5300                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5301 #endif
5302         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5303         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5304
5305         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5306                 err = tg3_load_5701_a0_firmware_fix(tp);
5307                 if (err)
5308                         return err;
5309         }
5310
5311 #if TG3_TSO_SUPPORT != 0
5312         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5313                 err = tg3_load_tso_firmware(tp);
5314                 if (err)
5315                         return err;
5316         }
5317 #endif
5318
5319         tp->tx_mode = TX_MODE_ENABLE;
5320         tw32_f(MAC_TX_MODE, tp->tx_mode);
5321         udelay(100);
5322
5323         tp->rx_mode = RX_MODE_ENABLE;
5324         tw32_f(MAC_RX_MODE, tp->rx_mode);
5325         udelay(10);
5326
5327         if (tp->link_config.phy_is_low_power) {
5328                 tp->link_config.phy_is_low_power = 0;
5329                 tp->link_config.speed = tp->link_config.orig_speed;
5330                 tp->link_config.duplex = tp->link_config.orig_duplex;
5331                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5332         }
5333
5334         tp->mi_mode = MAC_MI_MODE_BASE;
5335         tw32_f(MAC_MI_MODE, tp->mi_mode);
5336         udelay(80);
5337
5338         tw32(MAC_LED_CTRL, tp->led_ctrl);
5339
5340         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5341         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5342                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5343                 udelay(10);
5344         }
5345         tw32_f(MAC_RX_MODE, tp->rx_mode);
5346         udelay(10);
5347
5348         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5349                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5350                         /* Set drive transmission level to 1.2V  */
5351                         val = tr32(MAC_SERDES_CFG);
5352                         val &= 0xfffff000;
5353                         val |= 0x880;
5354                         tw32(MAC_SERDES_CFG, val);
5355                 }
5356                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5357                         tw32(MAC_SERDES_CFG, 0x616000);
5358         }
5359
5360         /* Prevent chip from dropping frames when flow control
5361          * is enabled.
5362          */
5363         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5364
5365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5366             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5367                 /* Use hardware link auto-negotiation */
5368                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5369         }
5370
5371         err = tg3_setup_phy(tp, 1);
5372         if (err)
5373                 return err;
5374
5375         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5376                 u32 tmp;
5377
5378                 /* Clear CRC stats. */
5379                 tg3_readphy(tp, 0x1e, &tmp);
5380                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5381                 tg3_readphy(tp, 0x14, &tmp);
5382         }
5383
5384         __tg3_set_rx_mode(tp->dev);
5385
5386         /* Initialize receive rules. */
5387         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5388         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5389         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5390         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5391
5392         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5393             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5394                 limit = 8;
5395         else
5396                 limit = 16;
5397         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5398                 limit -= 4;
5399         switch (limit) {
5400         case 16:
5401                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5402         case 15:
5403                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5404         case 14:
5405                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5406         case 13:
5407                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5408         case 12:
5409                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5410         case 11:
5411                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5412         case 10:
5413                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5414         case 9:
5415                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5416         case 8:
5417                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5418         case 7:
5419                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5420         case 6:
5421                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5422         case 5:
5423                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5424         case 4:
5425                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5426         case 3:
5427                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5428         case 2:
5429         case 1:
5430
5431         default:
5432                 break;
5433         };
5434
5435         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5436
5437         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5438                 tg3_enable_ints(tp);
5439
5440         return 0;
5441 }
5442
5443 /* Called at device open time to get the chip ready for
5444  * packet processing.  Invoked with tp->lock held.
5445  */
5446 static int tg3_init_hw(struct tg3 *tp)
5447 {
5448         int err;
5449
5450         /* Force the chip into D0. */
5451         err = tg3_set_power_state(tp, 0);
5452         if (err)
5453                 goto out;
5454
5455         tg3_switch_clocks(tp);
5456
5457         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5458
5459         err = tg3_reset_hw(tp);
5460
5461 out:
5462         return err;
5463 }
5464
5465 #define TG3_STAT_ADD32(PSTAT, REG) \
5466 do {    u32 __val = tr32(REG); \
5467         (PSTAT)->low += __val; \
5468         if ((PSTAT)->low < __val) \
5469                 (PSTAT)->high += 1; \
5470 } while (0)
5471
5472 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5473 {
5474         struct tg3_hw_stats *sp = tp->hw_stats;
5475
5476         if (!netif_carrier_ok(tp->dev))
5477                 return;
5478
5479         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5480         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5481         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5482         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5483         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5484         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5485         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5486         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5487         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5488         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5489         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5490         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5491         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5492
5493         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5494         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5495         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5496         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5497         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5498         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5499         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5500         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5501         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5502         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5503         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5504         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5505         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5506         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5507 }
5508
5509 static void tg3_timer(unsigned long __opaque)
5510 {
5511         struct tg3 *tp = (struct tg3 *) __opaque;
5512         unsigned long flags;
5513
5514         spin_lock_irqsave(&tp->lock, flags);
5515         spin_lock(&tp->tx_lock);
5516
5517         /* All of this garbage is because when using non-tagged
5518          * IRQ status the mailbox/status_block protocol the chip
5519          * uses with the cpu is race prone.
5520          */
5521         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5522                 tw32(GRC_LOCAL_CTRL,
5523                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5524         } else {
5525                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5526                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5527         }
5528
5529         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5530                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5531                 spin_unlock(&tp->tx_lock);
5532                 spin_unlock_irqrestore(&tp->lock, flags);
5533                 schedule_work(&tp->reset_task);
5534                 return;
5535         }
5536
5537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5539                 tg3_periodic_fetch_stats(tp);
5540
5541         /* This part only runs once per second. */
5542         if (!--tp->timer_counter) {
5543                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5544                         u32 mac_stat;
5545                         int phy_event;
5546
5547                         mac_stat = tr32(MAC_STATUS);
5548
5549                         phy_event = 0;
5550                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5551                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5552                                         phy_event = 1;
5553                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5554                                 phy_event = 1;
5555
5556                         if (phy_event)
5557                                 tg3_setup_phy(tp, 0);
5558                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5559                         u32 mac_stat = tr32(MAC_STATUS);
5560                         int need_setup = 0;
5561
5562                         if (netif_carrier_ok(tp->dev) &&
5563                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5564                                 need_setup = 1;
5565                         }
5566                         if (! netif_carrier_ok(tp->dev) &&
5567                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5568                                          MAC_STATUS_SIGNAL_DET))) {
5569                                 need_setup = 1;
5570                         }
5571                         if (need_setup) {
5572                                 tw32_f(MAC_MODE,
5573                                      (tp->mac_mode &
5574                                       ~MAC_MODE_PORT_MODE_MASK));
5575                                 udelay(40);
5576                                 tw32_f(MAC_MODE, tp->mac_mode);
5577                                 udelay(40);
5578                                 tg3_setup_phy(tp, 0);
5579                         }
5580                 }
5581
5582                 tp->timer_counter = tp->timer_multiplier;
5583         }
5584
5585         /* Heartbeat is only sent once every 120 seconds.  */
5586         if (!--tp->asf_counter) {
5587                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5588                         u32 val;
5589
5590                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5591                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5592                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5593                         val = tr32(GRC_RX_CPU_EVENT);
5594                         val |= (1 << 14);
5595                         tw32(GRC_RX_CPU_EVENT, val);
5596                 }
5597                 tp->asf_counter = tp->asf_multiplier;
5598         }
5599
5600         spin_unlock(&tp->tx_lock);
5601         spin_unlock_irqrestore(&tp->lock, flags);
5602
5603         tp->timer.expires = jiffies + tp->timer_offset;
5604         add_timer(&tp->timer);
5605 }
5606
5607 static int tg3_open(struct net_device *dev)
5608 {
5609         struct tg3 *tp = netdev_priv(dev);
5610         int err;
5611
5612         spin_lock_irq(&tp->lock);
5613         spin_lock(&tp->tx_lock);
5614
5615         tg3_disable_ints(tp);
5616         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5617
5618         spin_unlock(&tp->tx_lock);
5619         spin_unlock_irq(&tp->lock);
5620
5621         /* The placement of this call is tied
5622          * to the setup and use of Host TX descriptors.
5623          */
5624         err = tg3_alloc_consistent(tp);
5625         if (err)
5626                 return err;
5627
5628         err = request_irq(dev->irq, tg3_interrupt,
5629                           SA_SHIRQ, dev->name, dev);
5630
5631         if (err) {
5632                 tg3_free_consistent(tp);
5633                 return err;
5634         }
5635
5636         spin_lock_irq(&tp->lock);
5637         spin_lock(&tp->tx_lock);
5638
5639         err = tg3_init_hw(tp);
5640         if (err) {
5641                 tg3_halt(tp);
5642                 tg3_free_rings(tp);
5643         } else {
5644                 tp->timer_offset = HZ / 10;
5645                 tp->timer_counter = tp->timer_multiplier = 10;
5646                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5647
5648                 init_timer(&tp->timer);
5649                 tp->timer.expires = jiffies + tp->timer_offset;
5650                 tp->timer.data = (unsigned long) tp;
5651                 tp->timer.function = tg3_timer;
5652                 add_timer(&tp->timer);
5653
5654                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5655         }
5656
5657         spin_unlock(&tp->tx_lock);
5658         spin_unlock_irq(&tp->lock);
5659
5660         if (err) {
5661                 free_irq(dev->irq, dev);
5662                 tg3_free_consistent(tp);
5663                 return err;
5664         }
5665
5666         spin_lock_irq(&tp->lock);
5667         spin_lock(&tp->tx_lock);
5668
5669         tg3_enable_ints(tp);
5670
5671         spin_unlock(&tp->tx_lock);
5672         spin_unlock_irq(&tp->lock);
5673
5674         netif_start_queue(dev);
5675
5676         return 0;
5677 }
5678
5679 #if 0
5680 /*static*/ void tg3_dump_state(struct tg3 *tp)
5681 {
5682         u32 val32, val32_2, val32_3, val32_4, val32_5;
5683         u16 val16;
5684         int i;
5685
5686         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5687         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5688         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5689                val16, val32);
5690
5691         /* MAC block */
5692         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5693                tr32(MAC_MODE), tr32(MAC_STATUS));
5694         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5695                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5696         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5697                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5698         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5699                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5700
5701         /* Send data initiator control block */
5702         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5703                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5704         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5705                tr32(SNDDATAI_STATSCTRL));
5706
5707         /* Send data completion control block */
5708         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5709
5710         /* Send BD ring selector block */
5711         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5712                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5713
5714         /* Send BD initiator control block */
5715         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5716                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5717
5718         /* Send BD completion control block */
5719         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5720
5721         /* Receive list placement control block */
5722         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5723                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5724         printk("       RCVLPC_STATSCTRL[%08x]\n",
5725                tr32(RCVLPC_STATSCTRL));
5726
5727         /* Receive data and receive BD initiator control block */
5728         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5729                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5730
5731         /* Receive data completion control block */
5732         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5733                tr32(RCVDCC_MODE));
5734
5735         /* Receive BD initiator control block */
5736         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5737                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5738
5739         /* Receive BD completion control block */
5740         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5741                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5742
5743         /* Receive list selector control block */
5744         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5745                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5746
5747         /* Mbuf cluster free block */
5748         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5749                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5750
5751         /* Host coalescing control block */
5752         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5753                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5754         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5755                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5756                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5757         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5758                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5759                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5760         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5761                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5762         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5763                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5764
5765         /* Memory arbiter control block */
5766         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5767                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5768
5769         /* Buffer manager control block */
5770         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5771                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5772         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5773                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5774         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5775                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5776                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5777                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5778
5779         /* Read DMA control block */
5780         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5781                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5782
5783         /* Write DMA control block */
5784         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5785                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5786
5787         /* DMA completion block */
5788         printk("DEBUG: DMAC_MODE[%08x]\n",
5789                tr32(DMAC_MODE));
5790
5791         /* GRC block */
5792         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5793                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5794         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5795                tr32(GRC_LOCAL_CTRL));
5796
5797         /* TG3_BDINFOs */
5798         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5799                tr32(RCVDBDI_JUMBO_BD + 0x0),
5800                tr32(RCVDBDI_JUMBO_BD + 0x4),
5801                tr32(RCVDBDI_JUMBO_BD + 0x8),
5802                tr32(RCVDBDI_JUMBO_BD + 0xc));
5803         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5804                tr32(RCVDBDI_STD_BD + 0x0),
5805                tr32(RCVDBDI_STD_BD + 0x4),
5806                tr32(RCVDBDI_STD_BD + 0x8),
5807                tr32(RCVDBDI_STD_BD + 0xc));
5808         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5809                tr32(RCVDBDI_MINI_BD + 0x0),
5810                tr32(RCVDBDI_MINI_BD + 0x4),
5811                tr32(RCVDBDI_MINI_BD + 0x8),
5812                tr32(RCVDBDI_MINI_BD + 0xc));
5813
5814         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5815         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5816         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5817         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5818         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5819                val32, val32_2, val32_3, val32_4);
5820
5821         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5822         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5823         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5824         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5825         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5826                val32, val32_2, val32_3, val32_4);
5827
5828         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5829         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5830         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5831         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5832         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5833         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5834                val32, val32_2, val32_3, val32_4, val32_5);
5835
5836         /* SW status block */
5837         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5838                tp->hw_status->status,
5839                tp->hw_status->status_tag,
5840                tp->hw_status->rx_jumbo_consumer,
5841                tp->hw_status->rx_consumer,
5842                tp->hw_status->rx_mini_consumer,
5843                tp->hw_status->idx[0].rx_producer,
5844                tp->hw_status->idx[0].tx_consumer);
5845
5846         /* SW statistics block */
5847         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5848                ((u32 *)tp->hw_stats)[0],
5849                ((u32 *)tp->hw_stats)[1],
5850                ((u32 *)tp->hw_stats)[2],
5851                ((u32 *)tp->hw_stats)[3]);
5852
5853         /* Mailboxes */
5854         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5855                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5856                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5857                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5858                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5859
5860         /* NIC side send descriptors. */
5861         for (i = 0; i < 6; i++) {
5862                 unsigned long txd;
5863
5864                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5865                         + (i * sizeof(struct tg3_tx_buffer_desc));
5866                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5867                        i,
5868                        readl(txd + 0x0), readl(txd + 0x4),
5869                        readl(txd + 0x8), readl(txd + 0xc));
5870         }
5871
5872         /* NIC side RX descriptors. */
5873         for (i = 0; i < 6; i++) {
5874                 unsigned long rxd;
5875
5876                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5877                         + (i * sizeof(struct tg3_rx_buffer_desc));
5878                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5879                        i,
5880                        readl(rxd + 0x0), readl(rxd + 0x4),
5881                        readl(rxd + 0x8), readl(rxd + 0xc));
5882                 rxd += (4 * sizeof(u32));
5883                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5884                        i,
5885                        readl(rxd + 0x0), readl(rxd + 0x4),
5886                        readl(rxd + 0x8), readl(rxd + 0xc));
5887         }
5888
5889         for (i = 0; i < 6; i++) {
5890                 unsigned long rxd;
5891
5892                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5893                         + (i * sizeof(struct tg3_rx_buffer_desc));
5894                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5895                        i,
5896                        readl(rxd + 0x0), readl(rxd + 0x4),
5897                        readl(rxd + 0x8), readl(rxd + 0xc));
5898                 rxd += (4 * sizeof(u32));
5899                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5900                        i,
5901                        readl(rxd + 0x0), readl(rxd + 0x4),
5902                        readl(rxd + 0x8), readl(rxd + 0xc));
5903         }
5904 }
5905 #endif
5906
5907 static struct net_device_stats *tg3_get_stats(struct net_device *);
5908 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5909
5910 static int tg3_close(struct net_device *dev)
5911 {
5912         struct tg3 *tp = netdev_priv(dev);
5913
5914         netif_stop_queue(dev);
5915
5916         del_timer_sync(&tp->timer);
5917
5918         spin_lock_irq(&tp->lock);
5919         spin_lock(&tp->tx_lock);
5920 #if 0
5921         tg3_dump_state(tp);
5922 #endif
5923
5924         tg3_disable_ints(tp);
5925
5926         tg3_halt(tp);
5927         tg3_free_rings(tp);
5928         tp->tg3_flags &=
5929                 ~(TG3_FLAG_INIT_COMPLETE |
5930                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5931         netif_carrier_off(tp->dev);
5932
5933         spin_unlock(&tp->tx_lock);
5934         spin_unlock_irq(&tp->lock);
5935
5936         free_irq(dev->irq, dev);
5937
5938         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5939                sizeof(tp->net_stats_prev));
5940         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5941                sizeof(tp->estats_prev));
5942
5943         tg3_free_consistent(tp);
5944
5945         return 0;
5946 }
5947
5948 static inline unsigned long get_stat64(tg3_stat64_t *val)
5949 {
5950         unsigned long ret;
5951
5952 #if (BITS_PER_LONG == 32)
5953         ret = val->low;
5954 #else
5955         ret = ((u64)val->high << 32) | ((u64)val->low);
5956 #endif
5957         return ret;
5958 }
5959
5960 static unsigned long calc_crc_errors(struct tg3 *tp)
5961 {
5962         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5963
5964         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
5965             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5966              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5967                 unsigned long flags;
5968                 u32 val;
5969
5970                 spin_lock_irqsave(&tp->lock, flags);
5971                 tg3_readphy(tp, 0x1e, &val);
5972                 tg3_writephy(tp, 0x1e, val | 0x8000);
5973                 tg3_readphy(tp, 0x14, &val);
5974                 spin_unlock_irqrestore(&tp->lock, flags);
5975
5976                 tp->phy_crc_errors += val;
5977
5978                 return tp->phy_crc_errors;
5979         }
5980
5981         return get_stat64(&hw_stats->rx_fcs_errors);
5982 }
5983
5984 #define ESTAT_ADD(member) \
5985         estats->member =        old_estats->member + \
5986                                 get_stat64(&hw_stats->member)
5987
5988 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5989 {
5990         struct tg3_ethtool_stats *estats = &tp->estats;
5991         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5992         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5993
5994         if (!hw_stats)
5995                 return old_estats;
5996
5997         ESTAT_ADD(rx_octets);
5998         ESTAT_ADD(rx_fragments);
5999         ESTAT_ADD(rx_ucast_packets);
6000         ESTAT_ADD(rx_mcast_packets);
6001         ESTAT_ADD(rx_bcast_packets);
6002         ESTAT_ADD(rx_fcs_errors);
6003         ESTAT_ADD(rx_align_errors);
6004         ESTAT_ADD(rx_xon_pause_rcvd);
6005         ESTAT_ADD(rx_xoff_pause_rcvd);
6006         ESTAT_ADD(rx_mac_ctrl_rcvd);
6007         ESTAT_ADD(rx_xoff_entered);
6008         ESTAT_ADD(rx_frame_too_long_errors);
6009         ESTAT_ADD(rx_jabbers);
6010         ESTAT_ADD(rx_undersize_packets);
6011         ESTAT_ADD(rx_in_length_errors);
6012         ESTAT_ADD(rx_out_length_errors);
6013         ESTAT_ADD(rx_64_or_less_octet_packets);
6014         ESTAT_ADD(rx_65_to_127_octet_packets);
6015         ESTAT_ADD(rx_128_to_255_octet_packets);
6016         ESTAT_ADD(rx_256_to_511_octet_packets);
6017         ESTAT_ADD(rx_512_to_1023_octet_packets);
6018         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6019         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6020         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6021         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6022         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6023
6024         ESTAT_ADD(tx_octets);
6025         ESTAT_ADD(tx_collisions);
6026         ESTAT_ADD(tx_xon_sent);
6027         ESTAT_ADD(tx_xoff_sent);
6028         ESTAT_ADD(tx_flow_control);
6029         ESTAT_ADD(tx_mac_errors);
6030         ESTAT_ADD(tx_single_collisions);
6031         ESTAT_ADD(tx_mult_collisions);
6032         ESTAT_ADD(tx_deferred);
6033         ESTAT_ADD(tx_excessive_collisions);
6034         ESTAT_ADD(tx_late_collisions);
6035         ESTAT_ADD(tx_collide_2times);
6036         ESTAT_ADD(tx_collide_3times);
6037         ESTAT_ADD(tx_collide_4times);
6038         ESTAT_ADD(tx_collide_5times);
6039         ESTAT_ADD(tx_collide_6times);
6040         ESTAT_ADD(tx_collide_7times);
6041         ESTAT_ADD(tx_collide_8times);
6042         ESTAT_ADD(tx_collide_9times);
6043         ESTAT_ADD(tx_collide_10times);
6044         ESTAT_ADD(tx_collide_11times);
6045         ESTAT_ADD(tx_collide_12times);
6046         ESTAT_ADD(tx_collide_13times);
6047         ESTAT_ADD(tx_collide_14times);
6048         ESTAT_ADD(tx_collide_15times);
6049         ESTAT_ADD(tx_ucast_packets);
6050         ESTAT_ADD(tx_mcast_packets);
6051         ESTAT_ADD(tx_bcast_packets);
6052         ESTAT_ADD(tx_carrier_sense_errors);
6053         ESTAT_ADD(tx_discards);
6054         ESTAT_ADD(tx_errors);
6055
6056         ESTAT_ADD(dma_writeq_full);
6057         ESTAT_ADD(dma_write_prioq_full);
6058         ESTAT_ADD(rxbds_empty);
6059         ESTAT_ADD(rx_discards);
6060         ESTAT_ADD(rx_errors);
6061         ESTAT_ADD(rx_threshold_hit);
6062
6063         ESTAT_ADD(dma_readq_full);
6064         ESTAT_ADD(dma_read_prioq_full);
6065         ESTAT_ADD(tx_comp_queue_full);
6066
6067         ESTAT_ADD(ring_set_send_prod_index);
6068         ESTAT_ADD(ring_status_update);
6069         ESTAT_ADD(nic_irqs);
6070         ESTAT_ADD(nic_avoided_irqs);
6071         ESTAT_ADD(nic_tx_threshold_hit);
6072
6073         return estats;
6074 }
6075
6076 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6077 {
6078         struct tg3 *tp = netdev_priv(dev);
6079         struct net_device_stats *stats = &tp->net_stats;
6080         struct net_device_stats *old_stats = &tp->net_stats_prev;
6081         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6082
6083         if (!hw_stats)
6084                 return old_stats;
6085
6086         stats->rx_packets = old_stats->rx_packets +
6087                 get_stat64(&hw_stats->rx_ucast_packets) +
6088                 get_stat64(&hw_stats->rx_mcast_packets) +
6089                 get_stat64(&hw_stats->rx_bcast_packets);
6090                 
6091         stats->tx_packets = old_stats->tx_packets +
6092                 get_stat64(&hw_stats->tx_ucast_packets) +
6093                 get_stat64(&hw_stats->tx_mcast_packets) +
6094                 get_stat64(&hw_stats->tx_bcast_packets);
6095
6096         stats->rx_bytes = old_stats->rx_bytes +
6097                 get_stat64(&hw_stats->rx_octets);
6098         stats->tx_bytes = old_stats->tx_bytes +
6099                 get_stat64(&hw_stats->tx_octets);
6100
6101         stats->rx_errors = old_stats->rx_errors +
6102                 get_stat64(&hw_stats->rx_errors) +
6103                 get_stat64(&hw_stats->rx_discards);
6104         stats->tx_errors = old_stats->tx_errors +
6105                 get_stat64(&hw_stats->tx_errors) +
6106                 get_stat64(&hw_stats->tx_mac_errors) +
6107                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6108                 get_stat64(&hw_stats->tx_discards);
6109
6110         stats->multicast = old_stats->multicast +
6111                 get_stat64(&hw_stats->rx_mcast_packets);
6112         stats->collisions = old_stats->collisions +
6113                 get_stat64(&hw_stats->tx_collisions);
6114
6115         stats->rx_length_errors = old_stats->rx_length_errors +
6116                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6117                 get_stat64(&hw_stats->rx_undersize_packets);
6118
6119         stats->rx_over_errors = old_stats->rx_over_errors +
6120                 get_stat64(&hw_stats->rxbds_empty);
6121         stats->rx_frame_errors = old_stats->rx_frame_errors +
6122                 get_stat64(&hw_stats->rx_align_errors);
6123         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6124                 get_stat64(&hw_stats->tx_discards);
6125         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6126                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6127
6128         stats->rx_crc_errors = old_stats->rx_crc_errors +
6129                 calc_crc_errors(tp);
6130
6131         return stats;
6132 }
6133
6134 static inline u32 calc_crc(unsigned char *buf, int len)
6135 {
6136         u32 reg;
6137         u32 tmp;
6138         int j, k;
6139
6140         reg = 0xffffffff;
6141
6142         for (j = 0; j < len; j++) {
6143                 reg ^= buf[j];
6144
6145                 for (k = 0; k < 8; k++) {
6146                         tmp = reg & 0x01;
6147
6148                         reg >>= 1;
6149
6150                         if (tmp) {
6151                                 reg ^= 0xedb88320;
6152                         }
6153                 }
6154         }
6155
6156         return ~reg;
6157 }
6158
6159 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6160 {
6161         /* accept or reject all multicast frames */
6162         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6163         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6164         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6165         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6166 }
6167
6168 static void __tg3_set_rx_mode(struct net_device *dev)
6169 {
6170         struct tg3 *tp = netdev_priv(dev);
6171         u32 rx_mode;
6172
6173         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6174                                   RX_MODE_KEEP_VLAN_TAG);
6175
6176         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6177          * flag clear.
6178          */
6179 #if TG3_VLAN_TAG_USED
6180         if (!tp->vlgrp &&
6181             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6182                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6183 #else
6184         /* By definition, VLAN is disabled always in this
6185          * case.
6186          */
6187         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6188                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6189 #endif
6190
6191         if (dev->flags & IFF_PROMISC) {
6192                 /* Promiscuous mode. */
6193                 rx_mode |= RX_MODE_PROMISC;
6194         } else if (dev->flags & IFF_ALLMULTI) {
6195                 /* Accept all multicast. */
6196                 tg3_set_multi (tp, 1);
6197         } else if (dev->mc_count < 1) {
6198                 /* Reject all multicast. */
6199                 tg3_set_multi (tp, 0);
6200         } else {
6201                 /* Accept one or more multicast(s). */
6202                 struct dev_mc_list *mclist;
6203                 unsigned int i;
6204                 u32 mc_filter[4] = { 0, };
6205                 u32 regidx;
6206                 u32 bit;
6207                 u32 crc;
6208
6209                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6210                      i++, mclist = mclist->next) {
6211
6212                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6213                         bit = ~crc & 0x7f;
6214                         regidx = (bit & 0x60) >> 5;
6215                         bit &= 0x1f;
6216                         mc_filter[regidx] |= (1 << bit);
6217                 }
6218
6219                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6220                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6221                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6222                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6223         }
6224
6225         if (rx_mode != tp->rx_mode) {
6226                 tp->rx_mode = rx_mode;
6227                 tw32_f(MAC_RX_MODE, rx_mode);
6228                 udelay(10);
6229         }
6230 }
6231
6232 static void tg3_set_rx_mode(struct net_device *dev)
6233 {
6234         struct tg3 *tp = netdev_priv(dev);
6235
6236         spin_lock_irq(&tp->lock);
6237         spin_lock(&tp->tx_lock);
6238         __tg3_set_rx_mode(dev);
6239         spin_unlock(&tp->tx_lock);
6240         spin_unlock_irq(&tp->lock);
6241 }
6242
6243 #define TG3_REGDUMP_LEN         (32 * 1024)
6244
6245 static int tg3_get_regs_len(struct net_device *dev)
6246 {
6247         return TG3_REGDUMP_LEN;
6248 }
6249
6250 static void tg3_get_regs(struct net_device *dev,
6251                 struct ethtool_regs *regs, void *_p)
6252 {
6253         u32 *p = _p;
6254         struct tg3 *tp = netdev_priv(dev);
6255         u8 *orig_p = _p;
6256         int i;
6257
6258         regs->version = 0;
6259
6260         memset(p, 0, TG3_REGDUMP_LEN);
6261
6262         spin_lock_irq(&tp->lock);
6263         spin_lock(&tp->tx_lock);
6264
6265 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6266 #define GET_REG32_LOOP(base,len)                \
6267 do {    p = (u32 *)(orig_p + (base));           \
6268         for (i = 0; i < len; i += 4)            \
6269                 __GET_REG32((base) + i);        \
6270 } while (0)
6271 #define GET_REG32_1(reg)                        \
6272 do {    p = (u32 *)(orig_p + (reg));            \
6273         __GET_REG32((reg));                     \
6274 } while (0)
6275
6276         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6277         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6278         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6279         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6280         GET_REG32_1(SNDDATAC_MODE);
6281         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6282         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6283         GET_REG32_1(SNDBDC_MODE);
6284         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6285         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6286         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6287         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6288         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6289         GET_REG32_1(RCVDCC_MODE);
6290         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6291         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6292         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6293         GET_REG32_1(MBFREE_MODE);
6294         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6295         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6296         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6297         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6298         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6299         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6300         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6301         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6302         GET_REG32_LOOP(FTQ_RESET, 0x120);
6303         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6304         GET_REG32_1(DMAC_MODE);
6305         GET_REG32_LOOP(GRC_MODE, 0x4c);
6306         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6307                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6308
6309 #undef __GET_REG32
6310 #undef GET_REG32_LOOP
6311 #undef GET_REG32_1
6312
6313         spin_unlock(&tp->tx_lock);
6314         spin_unlock_irq(&tp->lock);
6315 }
6316
6317 static int tg3_get_eeprom_len(struct net_device *dev)
6318 {
6319         return EEPROM_CHIP_SIZE;
6320 }
6321
6322 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6323                                                  u32 offset, u32 *val);
6324 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6325 {
6326         struct tg3 *tp = dev->priv;
6327         int ret;
6328         u8  *pd;
6329         u32 i, offset, len, val, b_offset, b_count;
6330
6331         offset = eeprom->offset;
6332         len = eeprom->len;
6333         eeprom->len = 0;
6334
6335         ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6336         if (ret)
6337                 return ret;
6338         eeprom->magic = swab32(eeprom->magic);
6339
6340         if (offset & 3) {
6341                 /* adjustments to start on required 4 byte boundary */
6342                 b_offset = offset & 3;
6343                 b_count = 4 - b_offset;
6344                 if (b_count > len) {
6345                         /* i.e. offset=1 len=2 */
6346                         b_count = len;
6347                 }
6348                 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6349                 if (ret)
6350                         return ret;
6351                 memcpy(data, ((char*)&val) + b_offset, b_count);
6352                 len -= b_count;
6353                 offset += b_count;
6354                 eeprom->len += b_count;
6355         }
6356
6357         /* read bytes upto the last 4 byte boundary */
6358         pd = &data[eeprom->len];
6359         for (i = 0; i < (len - (len & 3)); i += 4) {
6360                 ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
6361                                 (u32*)(pd + i));
6362                 if (ret) {
6363                         eeprom->len += i;
6364                         return ret;
6365                 }
6366         }
6367         eeprom->len += i;
6368
6369         if (len & 3) {
6370                 /* read last bytes not ending on 4 byte boundary */
6371                 pd = &data[eeprom->len];
6372                 b_count = len & 3;
6373                 b_offset = offset + len - b_count;
6374                 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6375                 if (ret)
6376                         return ret;
6377                 memcpy(pd, ((char*)&val), b_count);
6378                 eeprom->len += b_count;
6379         }
6380         return 0;
6381 }
6382
6383 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6384 {
6385         struct tg3 *tp = netdev_priv(dev);
6386   
6387         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6388                                         tp->link_config.phy_is_low_power)
6389                 return -EAGAIN;
6390
6391         cmd->supported = (SUPPORTED_Autoneg);
6392
6393         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6394                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6395                                    SUPPORTED_1000baseT_Full);
6396
6397         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6398                 cmd->supported |= (SUPPORTED_100baseT_Half |
6399                                   SUPPORTED_100baseT_Full |
6400                                   SUPPORTED_10baseT_Half |
6401                                   SUPPORTED_10baseT_Full |
6402                                   SUPPORTED_MII);
6403         else
6404                 cmd->supported |= SUPPORTED_FIBRE;
6405   
6406         cmd->advertising = tp->link_config.advertising;
6407         cmd->speed = tp->link_config.active_speed;
6408         cmd->duplex = tp->link_config.active_duplex;
6409         cmd->port = 0;
6410         cmd->phy_address = PHY_ADDR;
6411         cmd->transceiver = 0;
6412         cmd->autoneg = tp->link_config.autoneg;
6413         cmd->maxtxpkt = 0;
6414         cmd->maxrxpkt = 0;
6415         return 0;
6416 }
6417   
6418 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6419 {
6420         struct tg3 *tp = netdev_priv(dev);
6421   
6422         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6423             tp->link_config.phy_is_low_power)
6424                 return -EAGAIN;
6425
6426         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6427                 /* These are the only valid advertisement bits allowed.  */
6428                 if (cmd->autoneg == AUTONEG_ENABLE &&
6429                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6430                                           ADVERTISED_1000baseT_Full |
6431                                           ADVERTISED_Autoneg |
6432                                           ADVERTISED_FIBRE)))
6433                         return -EINVAL;
6434         }
6435
6436         spin_lock_irq(&tp->lock);
6437         spin_lock(&tp->tx_lock);
6438
6439         tp->link_config.autoneg = cmd->autoneg;
6440         if (cmd->autoneg == AUTONEG_ENABLE) {
6441                 tp->link_config.advertising = cmd->advertising;
6442                 tp->link_config.speed = SPEED_INVALID;
6443                 tp->link_config.duplex = DUPLEX_INVALID;
6444         } else {
6445                 tp->link_config.advertising = 0;
6446                 tp->link_config.speed = cmd->speed;
6447                 tp->link_config.duplex = cmd->duplex;
6448         }
6449   
6450         tg3_setup_phy(tp, 1);
6451         spin_unlock(&tp->tx_lock);
6452         spin_unlock_irq(&tp->lock);
6453   
6454         return 0;
6455 }
6456   
6457 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6458 {
6459         struct tg3 *tp = netdev_priv(dev);
6460   
6461         strcpy(info->driver, DRV_MODULE_NAME);
6462         strcpy(info->version, DRV_MODULE_VERSION);
6463         strcpy(info->bus_info, pci_name(tp->pdev));
6464 }
6465   
6466 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6467 {
6468         struct tg3 *tp = netdev_priv(dev);
6469   
6470         wol->supported = WAKE_MAGIC;
6471         wol->wolopts = 0;
6472         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6473                 wol->wolopts = WAKE_MAGIC;
6474         memset(&wol->sopass, 0, sizeof(wol->sopass));
6475 }
6476   
6477 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6478 {
6479         struct tg3 *tp = netdev_priv(dev);
6480   
6481         if (wol->wolopts & ~WAKE_MAGIC)
6482                 return -EINVAL;
6483         if ((wol->wolopts & WAKE_MAGIC) &&
6484             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6485             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6486                 return -EINVAL;
6487   
6488         spin_lock_irq(&tp->lock);
6489         if (wol->wolopts & WAKE_MAGIC)
6490                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6491         else
6492                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6493         spin_unlock_irq(&tp->lock);
6494   
6495         return 0;
6496 }
6497   
6498 static u32 tg3_get_msglevel(struct net_device *dev)
6499 {
6500         struct tg3 *tp = netdev_priv(dev);
6501         return tp->msg_enable;
6502 }
6503   
6504 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6505 {
6506         struct tg3 *tp = netdev_priv(dev);
6507         tp->msg_enable = value;
6508 }
6509   
6510 #if TG3_TSO_SUPPORT != 0
6511 static int tg3_set_tso(struct net_device *dev, u32 value)
6512 {
6513         struct tg3 *tp = netdev_priv(dev);
6514
6515         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6516                 if (value)
6517                         return -EINVAL;
6518                 return 0;
6519         }
6520         return ethtool_op_set_tso(dev, value);
6521 }
6522 #endif
6523   
6524 static int tg3_nway_reset(struct net_device *dev)
6525 {
6526         struct tg3 *tp = netdev_priv(dev);
6527         u32 bmcr;
6528         int r;
6529   
6530         spin_lock_irq(&tp->lock);
6531         tg3_readphy(tp, MII_BMCR, &bmcr);
6532         tg3_readphy(tp, MII_BMCR, &bmcr);
6533         r = -EINVAL;
6534         if (bmcr & BMCR_ANENABLE) {
6535                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6536                 r = 0;
6537         }
6538         spin_unlock_irq(&tp->lock);
6539   
6540         return r;
6541 }
6542   
6543 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6544 {
6545         struct tg3 *tp = netdev_priv(dev);
6546   
6547         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6548         ering->rx_mini_max_pending = 0;
6549         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6550
6551         ering->rx_pending = tp->rx_pending;
6552         ering->rx_mini_pending = 0;
6553         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6554         ering->tx_pending = tp->tx_pending;
6555 }
6556   
6557 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6558 {
6559         struct tg3 *tp = netdev_priv(dev);
6560   
6561         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6562             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6563             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6564                 return -EINVAL;
6565   
6566         tg3_netif_stop(tp);
6567         spin_lock_irq(&tp->lock);
6568         spin_lock(&tp->tx_lock);
6569   
6570         tp->rx_pending = ering->rx_pending;
6571
6572         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6573             tp->rx_pending > 63)
6574                 tp->rx_pending = 63;
6575         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6576         tp->tx_pending = ering->tx_pending;
6577
6578         tg3_halt(tp);
6579         tg3_init_hw(tp);
6580         tg3_netif_start(tp);
6581         spin_unlock(&tp->tx_lock);
6582         spin_unlock_irq(&tp->lock);
6583   
6584         return 0;
6585 }
6586   
6587 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6588 {
6589         struct tg3 *tp = netdev_priv(dev);
6590   
6591         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6592         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6593         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6594 }
6595   
6596 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6597 {
6598         struct tg3 *tp = netdev_priv(dev);
6599   
6600         tg3_netif_stop(tp);
6601         spin_lock_irq(&tp->lock);
6602         spin_lock(&tp->tx_lock);
6603         if (epause->autoneg)
6604                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6605         else
6606                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6607         if (epause->rx_pause)
6608                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6609         else
6610                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6611         if (epause->tx_pause)
6612                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6613         else
6614                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6615         tg3_halt(tp);
6616         tg3_init_hw(tp);
6617         tg3_netif_start(tp);
6618         spin_unlock(&tp->tx_lock);
6619         spin_unlock_irq(&tp->lock);
6620   
6621         return 0;
6622 }
6623   
6624 static u32 tg3_get_rx_csum(struct net_device *dev)
6625 {
6626         struct tg3 *tp = netdev_priv(dev);
6627         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6628 }
6629   
6630 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6631 {
6632         struct tg3 *tp = netdev_priv(dev);
6633   
6634         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6635                 if (data != 0)
6636                         return -EINVAL;
6637                 return 0;
6638         }
6639   
6640         spin_lock_irq(&tp->lock);
6641         if (data)
6642                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6643         else
6644                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6645         spin_unlock_irq(&tp->lock);
6646   
6647         return 0;
6648 }
6649   
6650 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6651 {
6652         struct tg3 *tp = netdev_priv(dev);
6653   
6654         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6655                 if (data != 0)
6656                         return -EINVAL;
6657                 return 0;
6658         }
6659   
6660         if (data)
6661                 dev->features |= NETIF_F_IP_CSUM;
6662         else
6663                 dev->features &= ~NETIF_F_IP_CSUM;
6664
6665         return 0;
6666 }
6667
6668 static int tg3_get_stats_count (struct net_device *dev)
6669 {
6670         return TG3_NUM_STATS;
6671 }
6672
6673 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6674 {
6675         switch (stringset) {
6676         case ETH_SS_STATS:
6677                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6678                 break;
6679         default:
6680                 WARN_ON(1);     /* we need a WARN() */
6681                 break;
6682         }
6683 }
6684
6685 static void tg3_get_ethtool_stats (struct net_device *dev,
6686                                    struct ethtool_stats *estats, u64 *tmp_stats)
6687 {
6688         struct tg3 *tp = dev->priv;
6689         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6690 }
6691
6692 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6693 {
6694         struct mii_ioctl_data *data = if_mii(ifr);
6695         struct tg3 *tp = netdev_priv(dev);
6696         int err;
6697
6698         switch(cmd) {
6699         case SIOCGMIIPHY:
6700                 data->phy_id = PHY_ADDR;
6701
6702                 /* fallthru */
6703         case SIOCGMIIREG: {
6704                 u32 mii_regval;
6705
6706                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6707                         break;                  /* We have no PHY */
6708
6709                 spin_lock_irq(&tp->lock);
6710                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6711                 spin_unlock_irq(&tp->lock);
6712
6713                 data->val_out = mii_regval;
6714
6715                 return err;
6716         }
6717
6718         case SIOCSMIIREG:
6719                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6720                         break;                  /* We have no PHY */
6721
6722                 if (!capable(CAP_NET_ADMIN))
6723                         return -EPERM;
6724
6725                 spin_lock_irq(&tp->lock);
6726                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6727                 spin_unlock_irq(&tp->lock);
6728
6729                 return err;
6730
6731         default:
6732                 /* do nothing */
6733                 break;
6734         }
6735         return -EOPNOTSUPP;
6736 }
6737
6738 #if TG3_VLAN_TAG_USED
6739 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6740 {
6741         struct tg3 *tp = netdev_priv(dev);
6742
6743         spin_lock_irq(&tp->lock);
6744         spin_lock(&tp->tx_lock);
6745
6746         tp->vlgrp = grp;
6747
6748         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6749         __tg3_set_rx_mode(dev);
6750
6751         spin_unlock(&tp->tx_lock);
6752         spin_unlock_irq(&tp->lock);
6753 }
6754
6755 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6756 {
6757         struct tg3 *tp = netdev_priv(dev);
6758
6759         spin_lock_irq(&tp->lock);
6760         spin_lock(&tp->tx_lock);
6761         if (tp->vlgrp)
6762                 tp->vlgrp->vlan_devices[vid] = NULL;
6763         spin_unlock(&tp->tx_lock);
6764         spin_unlock_irq(&tp->lock);
6765 }
6766 #endif
6767
6768 static struct ethtool_ops tg3_ethtool_ops = {
6769         .get_settings           = tg3_get_settings,
6770         .set_settings           = tg3_set_settings,
6771         .get_drvinfo            = tg3_get_drvinfo,
6772         .get_regs_len           = tg3_get_regs_len,
6773         .get_regs               = tg3_get_regs,
6774         .get_wol                = tg3_get_wol,
6775         .set_wol                = tg3_set_wol,
6776         .get_msglevel           = tg3_get_msglevel,
6777         .set_msglevel           = tg3_set_msglevel,
6778         .nway_reset             = tg3_nway_reset,
6779         .get_link               = ethtool_op_get_link,
6780         .get_eeprom_len         = tg3_get_eeprom_len,
6781         .get_eeprom             = tg3_get_eeprom,
6782         .get_ringparam          = tg3_get_ringparam,
6783         .set_ringparam          = tg3_set_ringparam,
6784         .get_pauseparam         = tg3_get_pauseparam,
6785         .set_pauseparam         = tg3_set_pauseparam,
6786         .get_rx_csum            = tg3_get_rx_csum,
6787         .set_rx_csum            = tg3_set_rx_csum,
6788         .get_tx_csum            = ethtool_op_get_tx_csum,
6789         .set_tx_csum            = tg3_set_tx_csum,
6790         .get_sg                 = ethtool_op_get_sg,
6791         .set_sg                 = ethtool_op_set_sg,
6792 #if TG3_TSO_SUPPORT != 0
6793         .get_tso                = ethtool_op_get_tso,
6794         .set_tso                = tg3_set_tso,
6795 #endif
6796         .get_strings            = tg3_get_strings,
6797         .get_stats_count        = tg3_get_stats_count,
6798         .get_ethtool_stats      = tg3_get_ethtool_stats,
6799 };
6800
6801 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6802 static void __devinit tg3_nvram_init(struct tg3 *tp)
6803 {
6804         int j;
6805
6806         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
6807                 return;
6808
6809         tw32_f(GRC_EEPROM_ADDR,
6810              (EEPROM_ADDR_FSM_RESET |
6811               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6812                EEPROM_ADDR_CLKPERD_SHIFT)));
6813
6814         /* XXX schedule_timeout() ... */
6815         for (j = 0; j < 100; j++)
6816                 udelay(10);
6817
6818         /* Enable seeprom accesses. */
6819         tw32_f(GRC_LOCAL_CTRL,
6820              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6821         udelay(100);
6822
6823         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6824             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6825                 u32 nvcfg1;
6826
6827                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6828                         u32 nvaccess = tr32(NVRAM_ACCESS);
6829
6830                         tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6831                 }
6832
6833                 nvcfg1 = tr32(NVRAM_CFG1);
6834
6835                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6836                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6837                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6838                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6839                 } else {
6840                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6841                         tw32(NVRAM_CFG1, nvcfg1);
6842                 }
6843
6844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6845                         u32 nvaccess = tr32(NVRAM_ACCESS);
6846
6847                         tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6848                 }
6849         } else {
6850                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6851         }
6852 }
6853
6854 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6855                                                  u32 offset, u32 *val)
6856 {
6857         u32 tmp;
6858         int i;
6859
6860         if (offset > EEPROM_ADDR_ADDR_MASK ||
6861             (offset % 4) != 0)
6862                 return -EINVAL;
6863
6864         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6865                                         EEPROM_ADDR_DEVID_MASK |
6866                                         EEPROM_ADDR_READ);
6867         tw32(GRC_EEPROM_ADDR,
6868              tmp |
6869              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6870              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6871               EEPROM_ADDR_ADDR_MASK) |
6872              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6873
6874         for (i = 0; i < 10000; i++) {
6875                 tmp = tr32(GRC_EEPROM_ADDR);
6876
6877                 if (tmp & EEPROM_ADDR_COMPLETE)
6878                         break;
6879                 udelay(100);
6880         }
6881         if (!(tmp & EEPROM_ADDR_COMPLETE))
6882                 return -EBUSY;
6883
6884         *val = tr32(GRC_EEPROM_DATA);
6885         return 0;
6886 }
6887
6888 static int __devinit tg3_nvram_read(struct tg3 *tp,
6889                                     u32 offset, u32 *val)
6890 {
6891         int i;
6892
6893         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
6894                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
6895                 return -EINVAL;
6896         }
6897
6898         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6899                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6900
6901         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6902                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6903                           NVRAM_BUFFERED_PAGE_POS) +
6904                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6905
6906         if (offset > NVRAM_ADDR_MSK)
6907                 return -EINVAL;
6908
6909         tg3_nvram_lock(tp);
6910
6911         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6912                 u32 nvaccess = tr32(NVRAM_ACCESS);
6913
6914                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6915         }
6916
6917         tw32(NVRAM_ADDR, offset);
6918         tw32(NVRAM_CMD,
6919              NVRAM_CMD_RD | NVRAM_CMD_GO |
6920              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6921
6922         /* Wait for done bit to clear. */
6923         for (i = 0; i < 1000; i++) {
6924                 udelay(10);
6925                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6926                         udelay(10);
6927                         *val = swab32(tr32(NVRAM_RDDATA));
6928                         break;
6929                 }
6930         }
6931
6932         tg3_nvram_unlock(tp);
6933
6934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6935                 u32 nvaccess = tr32(NVRAM_ACCESS);
6936
6937                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6938         }
6939
6940         if (i >= 1000)
6941                 return -EBUSY;
6942
6943         return 0;
6944 }
6945
6946 struct subsys_tbl_ent {
6947         u16 subsys_vendor, subsys_devid;
6948         u32 phy_id;
6949 };
6950
6951 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6952         /* Broadcom boards. */
6953         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6954         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6955         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6956         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
6957         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6958         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6959         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
6960         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6961         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6962         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6963         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6964
6965         /* 3com boards. */
6966         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6967         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6968         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
6969         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6970         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6971
6972         /* DELL boards. */
6973         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6974         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6975         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6976         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6977
6978         /* Compaq boards. */
6979         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6980         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6981         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
6982         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6983         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6984
6985         /* IBM boards. */
6986         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
6987 };
6988
6989 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
6990 {
6991         int i;
6992
6993         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6994                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6995                      tp->pdev->subsystem_vendor) &&
6996                     (subsys_id_to_phy_id[i].subsys_devid ==
6997                      tp->pdev->subsystem_device))
6998                         return &subsys_id_to_phy_id[i];
6999         }
7000         return NULL;
7001 }
7002
7003 static int __devinit tg3_phy_probe(struct tg3 *tp)
7004 {
7005         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7006         u32 hw_phy_id, hw_phy_id_masked;
7007         u32 val;
7008         int eeprom_signature_found, eeprom_phy_serdes, err;
7009
7010         tp->phy_id = PHY_ID_INVALID;
7011         eeprom_phy_id = PHY_ID_INVALID;
7012         eeprom_phy_serdes = 0;
7013         eeprom_signature_found = 0;
7014         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7015         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7016                 u32 nic_cfg, led_cfg;
7017                 u32 nic_phy_id, cfg2;
7018
7019                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7020                 tp->nic_sram_data_cfg = nic_cfg;
7021
7022                 eeprom_signature_found = 1;
7023
7024                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7025                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7026                         eeprom_phy_serdes = 1;
7027
7028                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7029                 if (nic_phy_id != 0) {
7030                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7031                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7032
7033                         eeprom_phy_id  = (id1 >> 16) << 10;
7034                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7035                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7036                 } else
7037                         eeprom_phy_id = 0;
7038
7039                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7040                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7041                         led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7042                                     SHASTA_EXT_LED_MODE_MASK);
7043                 } else
7044                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7045
7046                 switch (led_cfg) {
7047                 default:
7048                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7049                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7050                         break;
7051
7052                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7053                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7054                         break;
7055
7056                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7057                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7058                         break;
7059
7060                 case SHASTA_EXT_LED_SHARED:
7061                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7062                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7063                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7064                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7065                                                  LED_CTRL_MODE_PHY_2);
7066                         break;
7067
7068                 case SHASTA_EXT_LED_MAC:
7069                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7070                         break;
7071
7072                 case SHASTA_EXT_LED_COMBO:
7073                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7074                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7075                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7076                                                  LED_CTRL_MODE_PHY_2);
7077                         break;
7078
7079                 };
7080
7081                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7082                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7083                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7084                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7085
7086                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7087                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7088                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7089                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7090                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7091
7092                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7093                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7094                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7095                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7096                 }
7097                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7098                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7099
7100                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &cfg2);
7101                 if (cfg2 & (1 << 17))
7102                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7103         }
7104
7105         /* Reading the PHY ID register can conflict with ASF
7106          * firwmare access to the PHY hardware.
7107          */
7108         err = 0;
7109         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7110                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7111         } else {
7112                 /* Now read the physical PHY_ID from the chip and verify
7113                  * that it is sane.  If it doesn't look good, we fall back
7114                  * to either the hard-coded table based PHY_ID and failing
7115                  * that the value found in the eeprom area.
7116                  */
7117                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7118                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7119
7120                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7121                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7122                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7123
7124                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7125         }
7126
7127         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7128                 tp->phy_id = hw_phy_id;
7129                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7130                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7131         } else {
7132                 if (eeprom_signature_found) {
7133                         tp->phy_id = eeprom_phy_id;
7134                         if (eeprom_phy_serdes)
7135                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7136                 } else {
7137                         struct subsys_tbl_ent *p;
7138
7139                         /* No eeprom signature?  Try the hardcoded
7140                          * subsys device table.
7141                          */
7142                         p = lookup_by_subsys(tp);
7143                         if (!p)
7144                                 return -ENODEV;
7145
7146                         tp->phy_id = p->phy_id;
7147                         if (!tp->phy_id ||
7148                             tp->phy_id == PHY_ID_BCM8002)
7149                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7150                 }
7151         }
7152
7153         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7154             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7155                 u32 bmsr, adv_reg, tg3_ctrl;
7156
7157                 tg3_readphy(tp, MII_BMSR, &bmsr);
7158                 tg3_readphy(tp, MII_BMSR, &bmsr);
7159
7160                 if (bmsr & BMSR_LSTATUS)
7161                         goto skip_phy_reset;
7162                     
7163                 err = tg3_phy_reset(tp);
7164                 if (err)
7165                         return err;
7166
7167                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7168                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7169                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7170                 tg3_ctrl = 0;
7171                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7172                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7173                                     MII_TG3_CTRL_ADV_1000_FULL);
7174                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7175                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7176                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7177                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7178                 }
7179
7180                 if (!tg3_copper_is_advertising_all(tp)) {
7181                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7182
7183                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7184                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7185
7186                         tg3_writephy(tp, MII_BMCR,
7187                                      BMCR_ANENABLE | BMCR_ANRESTART);
7188                 }
7189                 tg3_phy_set_wirespeed(tp);
7190
7191                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7192                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7193                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7194         }
7195
7196 skip_phy_reset:
7197         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7198                 err = tg3_init_5401phy_dsp(tp);
7199                 if (err)
7200                         return err;
7201         }
7202
7203         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7204                 err = tg3_init_5401phy_dsp(tp);
7205         }
7206
7207         if (!eeprom_signature_found)
7208                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7209
7210         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7211                 tp->link_config.advertising =
7212                         (ADVERTISED_1000baseT_Half |
7213                          ADVERTISED_1000baseT_Full |
7214                          ADVERTISED_Autoneg |
7215                          ADVERTISED_FIBRE);
7216         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7217                 tp->link_config.advertising &=
7218                         ~(ADVERTISED_1000baseT_Half |
7219                           ADVERTISED_1000baseT_Full);
7220
7221         return err;
7222 }
7223
7224 static void __devinit tg3_read_partno(struct tg3 *tp)
7225 {
7226         unsigned char vpd_data[256];
7227         int i;
7228
7229         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7230                 /* Sun decided not to put the necessary bits in the
7231                  * NVRAM of their onboard tg3 parts :(
7232                  */
7233                 strcpy(tp->board_part_number, "Sun 570X");
7234                 return;
7235         }
7236
7237         for (i = 0; i < 256; i += 4) {
7238                 u32 tmp;
7239
7240                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7241                         goto out_not_found;
7242
7243                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7244                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7245                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7246                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7247         }
7248
7249         /* Now parse and find the part number. */
7250         for (i = 0; i < 256; ) {
7251                 unsigned char val = vpd_data[i];
7252                 int block_end;
7253
7254                 if (val == 0x82 || val == 0x91) {
7255                         i = (i + 3 +
7256                              (vpd_data[i + 1] +
7257                               (vpd_data[i + 2] << 8)));
7258                         continue;
7259                 }
7260
7261                 if (val != 0x90)
7262                         goto out_not_found;
7263
7264                 block_end = (i + 3 +
7265                              (vpd_data[i + 1] +
7266                               (vpd_data[i + 2] << 8)));
7267                 i += 3;
7268                 while (i < block_end) {
7269                         if (vpd_data[i + 0] == 'P' &&
7270                             vpd_data[i + 1] == 'N') {
7271                                 int partno_len = vpd_data[i + 2];
7272
7273                                 if (partno_len > 24)
7274                                         goto out_not_found;
7275
7276                                 memcpy(tp->board_part_number,
7277                                        &vpd_data[i + 3],
7278                                        partno_len);
7279
7280                                 /* Success. */
7281                                 return;
7282                         }
7283                 }
7284
7285                 /* Part number not found. */
7286                 goto out_not_found;
7287         }
7288
7289 out_not_found:
7290         strcpy(tp->board_part_number, "none");
7291 }
7292
7293 #ifdef CONFIG_SPARC64
7294 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7295 {
7296         struct pci_dev *pdev = tp->pdev;
7297         struct pcidev_cookie *pcp = pdev->sysdata;
7298
7299         if (pcp != NULL) {
7300                 int node = pcp->prom_node;
7301                 u32 venid;
7302                 int err;
7303
7304                 err = prom_getproperty(node, "subsystem-vendor-id",
7305                                        (char *) &venid, sizeof(venid));
7306                 if (err == 0 || err == -1)
7307                         return 0;
7308                 if (venid == PCI_VENDOR_ID_SUN)
7309                         return 1;
7310         }
7311         return 0;
7312 }
7313 #endif
7314
7315 static int __devinit tg3_get_invariants(struct tg3 *tp)
7316 {
7317         u32 misc_ctrl_reg;
7318         u32 cacheline_sz_reg;
7319         u32 pci_state_reg, grc_misc_cfg;
7320         u32 val;
7321         u16 pci_cmd;
7322         int err;
7323
7324 #ifdef CONFIG_SPARC64
7325         if (tg3_is_sun_570X(tp))
7326                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7327 #endif
7328
7329         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7330          * reordering to the mailbox registers done by the host
7331          * controller can cause major troubles.  We read back from
7332          * every mailbox register write to force the writes to be
7333          * posted to the chip in order.
7334          */
7335         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7336                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7337             pci_find_device(PCI_VENDOR_ID_INTEL,
7338                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7339             pci_find_device(PCI_VENDOR_ID_INTEL,
7340                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7341             pci_find_device(PCI_VENDOR_ID_INTEL,
7342                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7343             pci_find_device(PCI_VENDOR_ID_AMD,
7344                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7345                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7346
7347         /* Force memory write invalidate off.  If we leave it on,
7348          * then on 5700_BX chips we have to enable a workaround.
7349          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7350          * to match the cacheline size.  The Broadcom driver have this
7351          * workaround but turns MWI off all the times so never uses
7352          * it.  This seems to suggest that the workaround is insufficient.
7353          */
7354         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7355         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7356         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7357
7358         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7359          * has the register indirect write enable bit set before
7360          * we try to access any of the MMIO registers.  It is also
7361          * critical that the PCI-X hw workaround situation is decided
7362          * before that as well.
7363          */
7364         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7365                               &misc_ctrl_reg);
7366
7367         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7368                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7369
7370         /* Initialize misc host control in PCI block. */
7371         tp->misc_host_ctrl |= (misc_ctrl_reg &
7372                                MISC_HOST_CTRL_CHIPREV);
7373         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7374                                tp->misc_host_ctrl);
7375
7376         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7377                               &cacheline_sz_reg);
7378
7379         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7380         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7381         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7382         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7383
7384         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7385                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7386
7387         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7388             tp->pci_lat_timer < 64) {
7389                 tp->pci_lat_timer = 64;
7390
7391                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7392                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7393                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7394                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7395
7396                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7397                                        cacheline_sz_reg);
7398         }
7399
7400         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7401                               &pci_state_reg);
7402
7403         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7404                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7405
7406                 /* If this is a 5700 BX chipset, and we are in PCI-X
7407                  * mode, enable register write workaround.
7408                  *
7409                  * The workaround is to use indirect register accesses
7410                  * for all chip writes not to mailbox registers.
7411                  */
7412                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7413                         u32 pm_reg;
7414                         u16 pci_cmd;
7415
7416                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7417
7418                         /* The chip can have it's power management PCI config
7419                          * space registers clobbered due to this bug.
7420                          * So explicitly force the chip into D0 here.
7421                          */
7422                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7423                                               &pm_reg);
7424                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7425                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7426                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7427                                                pm_reg);
7428
7429                         /* Also, force SERR#/PERR# in PCI command. */
7430                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7431                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7432                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7433                 }
7434         }
7435
7436         /* Back to back register writes can cause problems on this chip,
7437          * the workaround is to read back all reg writes except those to
7438          * mailbox regs.  See tg3_write_indirect_reg32().
7439          *
7440          * PCI Express 5750_A0 rev chips need this workaround too.
7441          */
7442         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7443             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7444              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7445                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7446
7447         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7448                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7449         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7450                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7451
7452         /* Chip-specific fixup from Broadcom driver */
7453         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7454             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7455                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7456                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7457         }
7458
7459         /* Force the chip into D0. */
7460         err = tg3_set_power_state(tp, 0);
7461         if (err) {
7462                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7463                        pci_name(tp->pdev));
7464                 return err;
7465         }
7466
7467         /* 5700 B0 chips do not support checksumming correctly due
7468          * to hardware bugs.
7469          */
7470         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7471                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7472
7473         /* Pseudo-header checksum is done by hardware logic and not
7474          * the offload processers, so make the chip do the pseudo-
7475          * header checksums on receive.  For transmit it is more
7476          * convenient to do the pseudo-header checksum in software
7477          * as Linux does that on transmit for us in all cases.
7478          */
7479         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7480         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7481
7482         /* Derive initial jumbo mode from MTU assigned in
7483          * ether_setup() via the alloc_etherdev() call
7484          */
7485         if (tp->dev->mtu > ETH_DATA_LEN)
7486                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7487
7488         /* Determine WakeOnLan speed to use. */
7489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7490             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7491             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7492             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7493                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7494         } else {
7495                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7496         }
7497
7498         /* A few boards don't want Ethernet@WireSpeed phy feature */
7499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7500             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7501              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7502              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7503                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7504
7505         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7506             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7507                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7508         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7509                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7510
7511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7513                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7514
7515         /* Only 5701 and later support tagged irq status mode.
7516          * Also, 5788 chips cannot use tagged irq status.
7517          *
7518          * However, since we are using NAPI avoid tagged irq status
7519          * because the interrupt condition is more difficult to
7520          * fully clear in that mode.
7521          */
7522         tp->coalesce_mode = 0;
7523
7524         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7525             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7526                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7527
7528         /* Initialize MAC MI mode, polling disabled. */
7529         tw32_f(MAC_MI_MODE, tp->mi_mode);
7530         udelay(80);
7531
7532         /* Initialize data/descriptor byte/word swapping. */
7533         val = tr32(GRC_MODE);
7534         val &= GRC_MODE_HOST_STACKUP;
7535         tw32(GRC_MODE, val | tp->grc_mode);
7536
7537         tg3_switch_clocks(tp);
7538
7539         /* Clear this out for sanity. */
7540         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7541
7542         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7543                               &pci_state_reg);
7544         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7545             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7546                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7547
7548                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7549                     chiprevid == CHIPREV_ID_5701_B0 ||
7550                     chiprevid == CHIPREV_ID_5701_B2 ||
7551                     chiprevid == CHIPREV_ID_5701_B5) {
7552                         void __iomem *sram_base;
7553
7554                         /* Write some dummy words into the SRAM status block
7555                          * area, see if it reads back correctly.  If the return
7556                          * value is bad, force enable the PCIX workaround.
7557                          */
7558                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7559
7560                         writel(0x00000000, sram_base);
7561                         writel(0x00000000, sram_base + 4);
7562                         writel(0xffffffff, sram_base + 4);
7563                         if (readl(sram_base) != 0x00000000)
7564                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7565                 }
7566         }
7567
7568         udelay(50);
7569         tg3_nvram_init(tp);
7570
7571         grc_misc_cfg = tr32(GRC_MISC_CFG);
7572         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7573
7574         /* Broadcom's driver says that CIOBE multisplit has a bug */
7575 #if 0
7576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7577             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7578                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7579                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7580         }
7581 #endif
7582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7583             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7584              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7585                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7586
7587         /* these are limited to 10/100 only */
7588         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7589              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7590             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7591              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7592              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7593               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7594               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7595             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7596              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7597                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7598
7599         err = tg3_phy_probe(tp);
7600         if (err) {
7601                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7602                        pci_name(tp->pdev), err);
7603                 /* ... but do not return immediately ... */
7604         }
7605
7606         tg3_read_partno(tp);
7607
7608         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7609                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7610         } else {
7611                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7612                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7613                 else
7614                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7615         }
7616
7617         /* 5700 {AX,BX} chips have a broken status block link
7618          * change bit implementation, so we must use the
7619          * status register in those cases.
7620          */
7621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7622                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7623         else
7624                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7625
7626         /* The led_ctrl is set during tg3_phy_probe, here we might
7627          * have to force the link status polling mechanism based
7628          * upon subsystem IDs.
7629          */
7630         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7631             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7632                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7633                                   TG3_FLAG_USE_LINKCHG_REG);
7634         }
7635
7636         /* For all SERDES we poll the MAC status register. */
7637         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7638                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7639         else
7640                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7641
7642         /* 5700 BX chips need to have their TX producer index mailboxes
7643          * written twice to workaround a bug.
7644          */
7645         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7646                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7647         else
7648                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7649
7650         /* It seems all chips can get confused if TX buffers
7651          * straddle the 4GB address boundary in some cases.
7652          */
7653         tp->dev->hard_start_xmit = tg3_start_xmit;
7654
7655         tp->rx_offset = 2;
7656         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7657             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7658                 tp->rx_offset = 0;
7659
7660         /* By default, disable wake-on-lan.  User can change this
7661          * using ETHTOOL_SWOL.
7662          */
7663         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7664
7665         return err;
7666 }
7667
7668 #ifdef CONFIG_SPARC64
7669 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7670 {
7671         struct net_device *dev = tp->dev;
7672         struct pci_dev *pdev = tp->pdev;
7673         struct pcidev_cookie *pcp = pdev->sysdata;
7674
7675         if (pcp != NULL) {
7676                 int node = pcp->prom_node;
7677
7678                 if (prom_getproplen(node, "local-mac-address") == 6) {
7679                         prom_getproperty(node, "local-mac-address",
7680                                          dev->dev_addr, 6);
7681                         return 0;
7682                 }
7683         }
7684         return -ENODEV;
7685 }
7686
7687 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7688 {
7689         struct net_device *dev = tp->dev;
7690
7691         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7692         return 0;
7693 }
7694 #endif
7695
7696 static int __devinit tg3_get_device_address(struct tg3 *tp)
7697 {
7698         struct net_device *dev = tp->dev;
7699         u32 hi, lo, mac_offset;
7700
7701 #ifdef CONFIG_SPARC64
7702         if (!tg3_get_macaddr_sparc(tp))
7703                 return 0;
7704 #endif
7705
7706         mac_offset = 0x7c;
7707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7708             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
7709                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7710                         mac_offset = 0xcc;
7711                 if (tg3_nvram_lock(tp))
7712                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7713                 else
7714                         tg3_nvram_unlock(tp);
7715         }
7716
7717         /* First try to get it from MAC address mailbox. */
7718         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7719         if ((hi >> 16) == 0x484b) {
7720                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7721                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7722
7723                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7724                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7725                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7726                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7727                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7728         }
7729         /* Next, try NVRAM. */
7730         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
7731                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7732                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7733                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7734                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7735                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7736                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7737                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7738                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7739         }
7740         /* Finally just fetch it out of the MAC control regs. */
7741         else {
7742                 hi = tr32(MAC_ADDR_0_HIGH);
7743                 lo = tr32(MAC_ADDR_0_LOW);
7744
7745                 dev->dev_addr[5] = lo & 0xff;
7746                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7747                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7748                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7749                 dev->dev_addr[1] = hi & 0xff;
7750                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7751         }
7752
7753         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7754 #ifdef CONFIG_SPARC64
7755                 if (!tg3_get_default_macaddr_sparc(tp))
7756                         return 0;
7757 #endif
7758                 return -EINVAL;
7759         }
7760         return 0;
7761 }
7762
7763 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7764 {
7765         struct tg3_internal_buffer_desc test_desc;
7766         u32 sram_dma_descs;
7767         int i, ret;
7768
7769         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7770
7771         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7772         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7773         tw32(RDMAC_STATUS, 0);
7774         tw32(WDMAC_STATUS, 0);
7775
7776         tw32(BUFMGR_MODE, 0);
7777         tw32(FTQ_RESET, 0);
7778
7779         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7780         test_desc.addr_lo = buf_dma & 0xffffffff;
7781         test_desc.nic_mbuf = 0x00002100;
7782         test_desc.len = size;
7783
7784         /*
7785          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7786          * the *second* time the tg3 driver was getting loaded after an
7787          * initial scan.
7788          *
7789          * Broadcom tells me:
7790          *   ...the DMA engine is connected to the GRC block and a DMA
7791          *   reset may affect the GRC block in some unpredictable way...
7792          *   The behavior of resets to individual blocks has not been tested.
7793          *
7794          * Broadcom noted the GRC reset will also reset all sub-components.
7795          */
7796         if (to_device) {
7797                 test_desc.cqid_sqid = (13 << 8) | 2;
7798
7799                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7800                 udelay(40);
7801         } else {
7802                 test_desc.cqid_sqid = (16 << 8) | 7;
7803
7804                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7805                 udelay(40);
7806         }
7807         test_desc.flags = 0x00000005;
7808
7809         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7810                 u32 val;
7811
7812                 val = *(((u32 *)&test_desc) + i);
7813                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7814                                        sram_dma_descs + (i * sizeof(u32)));
7815                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7816         }
7817         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7818
7819         if (to_device) {
7820                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7821         } else {
7822                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7823         }
7824
7825         ret = -ENODEV;
7826         for (i = 0; i < 40; i++) {
7827                 u32 val;
7828
7829                 if (to_device)
7830                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7831                 else
7832                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7833                 if ((val & 0xffff) == sram_dma_descs) {
7834                         ret = 0;
7835                         break;
7836                 }
7837
7838                 udelay(100);
7839         }
7840
7841         return ret;
7842 }
7843
7844 #define TEST_BUFFER_SIZE        0x400
7845
7846 static int __devinit tg3_test_dma(struct tg3 *tp)
7847 {
7848         dma_addr_t buf_dma;
7849         u32 *buf;
7850         int ret;
7851
7852         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7853         if (!buf) {
7854                 ret = -ENOMEM;
7855                 goto out_nofree;
7856         }
7857
7858         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7859                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7860
7861 #ifndef CONFIG_X86
7862         {
7863                 u8 byte;
7864                 int cacheline_size;
7865                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7866
7867                 if (byte == 0)
7868                         cacheline_size = 1024;
7869                 else
7870                         cacheline_size = (int) byte * 4;
7871
7872                 switch (cacheline_size) {
7873                 case 16:
7874                 case 32:
7875                 case 64:
7876                 case 128:
7877                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7878                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7879                                 tp->dma_rwctrl |=
7880                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7881                                 break;
7882                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7883                                 tp->dma_rwctrl &=
7884                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
7885                                 tp->dma_rwctrl |=
7886                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7887                                 break;
7888                         }
7889                         /* fallthrough */
7890                 case 256:
7891                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7892                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7893                                 tp->dma_rwctrl |=
7894                                         DMA_RWCTRL_WRITE_BNDRY_256;
7895                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7896                                 tp->dma_rwctrl |=
7897                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7898                 };
7899         }
7900 #endif
7901
7902         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7903                 tp->dma_rwctrl |= 0x001f0000;
7904         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7905                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7906                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7907                         tp->dma_rwctrl |= 0x003f0000;
7908                 else
7909                         tp->dma_rwctrl |= 0x003f000f;
7910         } else {
7911                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7912                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7913                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7914
7915                         if (ccval == 0x6 || ccval == 0x7)
7916                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7917
7918                         /* Set bit 23 to renable PCIX hw bug fix */
7919                         tp->dma_rwctrl |= 0x009f0000;
7920                 } else {
7921                         tp->dma_rwctrl |= 0x001b000f;
7922                 }
7923         }
7924
7925         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7927                 tp->dma_rwctrl &= 0xfffffff0;
7928
7929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7930             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7931                 /* Remove this if it causes problems for some boards. */
7932                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7933
7934                 /* On 5700/5701 chips, we need to set this bit.
7935                  * Otherwise the chip will issue cacheline transactions
7936                  * to streamable DMA memory with not all the byte
7937                  * enables turned on.  This is an error on several
7938                  * RISC PCI controllers, in particular sparc64.
7939                  *
7940                  * On 5703/5704 chips, this bit has been reassigned
7941                  * a different meaning.  In particular, it is used
7942                  * on those chips to enable a PCI-X workaround.
7943                  */
7944                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7945         }
7946
7947         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7948
7949 #if 0
7950         /* Unneeded, already done by tg3_get_invariants.  */
7951         tg3_switch_clocks(tp);
7952 #endif
7953
7954         ret = 0;
7955         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7956             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7957                 goto out;
7958
7959         while (1) {
7960                 u32 *p = buf, i;
7961
7962                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7963                         p[i] = i;
7964
7965                 /* Send the buffer to the chip. */
7966                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7967                 if (ret) {
7968                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7969                         break;
7970                 }
7971
7972 #if 0
7973                 /* validate data reached card RAM correctly. */
7974                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7975                         u32 val;
7976                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7977                         if (le32_to_cpu(val) != p[i]) {
7978                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
7979                                 /* ret = -ENODEV here? */
7980                         }
7981                         p[i] = 0;
7982                 }
7983 #endif
7984                 /* Now read it back. */
7985                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7986                 if (ret) {
7987                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7988
7989                         break;
7990                 }
7991
7992                 /* Verify it. */
7993                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7994                         if (p[i] == i)
7995                                 continue;
7996
7997                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7998                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7999                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8000                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8001                                 break;
8002                         } else {
8003                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8004                                 ret = -ENODEV;
8005                                 goto out;
8006                         }
8007                 }
8008
8009                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8010                         /* Success. */
8011                         ret = 0;
8012                         break;
8013                 }
8014         }
8015
8016 out:
8017         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8018 out_nofree:
8019         return ret;
8020 }
8021
8022 static void __devinit tg3_init_link_config(struct tg3 *tp)
8023 {
8024         tp->link_config.advertising =
8025                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8026                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8027                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8028                  ADVERTISED_Autoneg | ADVERTISED_MII);
8029         tp->link_config.speed = SPEED_INVALID;
8030         tp->link_config.duplex = DUPLEX_INVALID;
8031         tp->link_config.autoneg = AUTONEG_ENABLE;
8032         netif_carrier_off(tp->dev);
8033         tp->link_config.active_speed = SPEED_INVALID;
8034         tp->link_config.active_duplex = DUPLEX_INVALID;
8035         tp->link_config.phy_is_low_power = 0;
8036         tp->link_config.orig_speed = SPEED_INVALID;
8037         tp->link_config.orig_duplex = DUPLEX_INVALID;
8038         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8039 }
8040
8041 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8042 {
8043         tp->bufmgr_config.mbuf_read_dma_low_water =
8044                 DEFAULT_MB_RDMA_LOW_WATER;
8045         tp->bufmgr_config.mbuf_mac_rx_low_water =
8046                 DEFAULT_MB_MACRX_LOW_WATER;
8047         tp->bufmgr_config.mbuf_high_water =
8048                 DEFAULT_MB_HIGH_WATER;
8049
8050         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8051                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8052         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8053                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8054         tp->bufmgr_config.mbuf_high_water_jumbo =
8055                 DEFAULT_MB_HIGH_WATER_JUMBO;
8056
8057         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8058         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8059 }
8060
8061 static char * __devinit tg3_phy_string(struct tg3 *tp)
8062 {
8063         switch (tp->phy_id & PHY_ID_MASK) {
8064         case PHY_ID_BCM5400:    return "5400";
8065         case PHY_ID_BCM5401:    return "5401";
8066         case PHY_ID_BCM5411:    return "5411";
8067         case PHY_ID_BCM5701:    return "5701";
8068         case PHY_ID_BCM5703:    return "5703";
8069         case PHY_ID_BCM5704:    return "5704";
8070         case PHY_ID_BCM5705:    return "5705";
8071         case PHY_ID_BCM5750:    return "5750";
8072         case PHY_ID_BCM8002:    return "8002/serdes";
8073         case 0:                 return "serdes";
8074         default:                return "unknown";
8075         };
8076 }
8077
8078 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8079 {
8080         struct pci_dev *peer;
8081         unsigned int func, devnr = tp->pdev->devfn & ~7;
8082
8083         for (func = 0; func < 8; func++) {
8084                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8085                 if (peer && peer != tp->pdev)
8086                         break;
8087                 pci_dev_put(peer);
8088         }
8089         if (!peer || peer == tp->pdev)
8090                 BUG();
8091
8092         /*
8093          * We don't need to keep the refcount elevated; there's no way
8094          * to remove one half of this device without removing the other
8095          */
8096         pci_dev_put(peer);
8097
8098         return peer;
8099 }
8100
8101 static int __devinit tg3_init_one(struct pci_dev *pdev,
8102                                   const struct pci_device_id *ent)
8103 {
8104         static int tg3_version_printed = 0;
8105         unsigned long tg3reg_base, tg3reg_len;
8106         struct net_device *dev;
8107         struct tg3 *tp;
8108         int i, err, pci_using_dac, pm_cap;
8109
8110         if (tg3_version_printed++ == 0)
8111                 printk(KERN_INFO "%s", version);
8112
8113         err = pci_enable_device(pdev);
8114         if (err) {
8115                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8116                        "aborting.\n");
8117                 return err;
8118         }
8119
8120         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8121                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8122                        "base address, aborting.\n");
8123                 err = -ENODEV;
8124                 goto err_out_disable_pdev;
8125         }
8126
8127         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8128         if (err) {
8129                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8130                        "aborting.\n");
8131                 goto err_out_disable_pdev;
8132         }
8133
8134         pci_set_master(pdev);
8135
8136         /* Find power-management capability. */
8137         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8138         if (pm_cap == 0) {
8139                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8140                        "aborting.\n");
8141                 err = -EIO;
8142                 goto err_out_free_res;
8143         }
8144
8145         /* Configure DMA attributes. */
8146         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8147         if (!err) {
8148                 pci_using_dac = 1;
8149                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8150                 if (err < 0) {
8151                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8152                                "for consistent allocations\n");
8153                         goto err_out_free_res;
8154                 }
8155         } else {
8156                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8157                 if (err) {
8158                         printk(KERN_ERR PFX "No usable DMA configuration, "
8159                                "aborting.\n");
8160                         goto err_out_free_res;
8161                 }
8162                 pci_using_dac = 0;
8163         }
8164
8165         tg3reg_base = pci_resource_start(pdev, 0);
8166         tg3reg_len = pci_resource_len(pdev, 0);
8167
8168         dev = alloc_etherdev(sizeof(*tp));
8169         if (!dev) {
8170                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8171                 err = -ENOMEM;
8172                 goto err_out_free_res;
8173         }
8174
8175         SET_MODULE_OWNER(dev);
8176         SET_NETDEV_DEV(dev, &pdev->dev);
8177
8178         if (pci_using_dac)
8179                 dev->features |= NETIF_F_HIGHDMA;
8180         dev->features |= NETIF_F_LLTX;
8181 #if TG3_VLAN_TAG_USED
8182         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8183         dev->vlan_rx_register = tg3_vlan_rx_register;
8184         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8185 #endif
8186
8187         tp = netdev_priv(dev);
8188         tp->pdev = pdev;
8189         tp->dev = dev;
8190         tp->pm_cap = pm_cap;
8191         tp->mac_mode = TG3_DEF_MAC_MODE;
8192         tp->rx_mode = TG3_DEF_RX_MODE;
8193         tp->tx_mode = TG3_DEF_TX_MODE;
8194         tp->mi_mode = MAC_MI_MODE_BASE;
8195         if (tg3_debug > 0)
8196                 tp->msg_enable = tg3_debug;
8197         else
8198                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8199
8200         /* The word/byte swap controls here control register access byte
8201          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8202          * setting below.
8203          */
8204         tp->misc_host_ctrl =
8205                 MISC_HOST_CTRL_MASK_PCI_INT |
8206                 MISC_HOST_CTRL_WORD_SWAP |
8207                 MISC_HOST_CTRL_INDIR_ACCESS |
8208                 MISC_HOST_CTRL_PCISTATE_RW;
8209
8210         /* The NONFRM (non-frame) byte/word swap controls take effect
8211          * on descriptor entries, anything which isn't packet data.
8212          *
8213          * The StrongARM chips on the board (one for tx, one for rx)
8214          * are running in big-endian mode.
8215          */
8216         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8217                         GRC_MODE_WSWAP_NONFRM_DATA);
8218 #ifdef __BIG_ENDIAN
8219         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8220 #endif
8221         spin_lock_init(&tp->lock);
8222         spin_lock_init(&tp->tx_lock);
8223         spin_lock_init(&tp->indirect_lock);
8224         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8225
8226         tp->regs = ioremap(tg3reg_base, tg3reg_len);
8227         if (tp->regs == 0UL) {
8228                 printk(KERN_ERR PFX "Cannot map device registers, "
8229                        "aborting.\n");
8230                 err = -ENOMEM;
8231                 goto err_out_free_dev;
8232         }
8233
8234         tg3_init_link_config(tp);
8235
8236         tg3_init_bufmgr_config(tp);
8237
8238         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8239         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8240         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8241
8242         dev->open = tg3_open;
8243         dev->stop = tg3_close;
8244         dev->get_stats = tg3_get_stats;
8245         dev->set_multicast_list = tg3_set_rx_mode;
8246         dev->set_mac_address = tg3_set_mac_addr;
8247         dev->do_ioctl = tg3_ioctl;
8248         dev->tx_timeout = tg3_tx_timeout;
8249         dev->poll = tg3_poll;
8250         dev->ethtool_ops = &tg3_ethtool_ops;
8251         dev->weight = 64;
8252         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8253         dev->change_mtu = tg3_change_mtu;
8254         dev->irq = pdev->irq;
8255 #ifdef CONFIG_NET_POLL_CONTROLLER
8256         dev->poll_controller = tg3_poll_controller;
8257 #endif
8258
8259         err = tg3_get_invariants(tp);
8260         if (err) {
8261                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8262                        "aborting.\n");
8263                 goto err_out_iounmap;
8264         }
8265
8266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8267             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8268                 tp->bufmgr_config.mbuf_read_dma_low_water =
8269                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8270                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8271                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8272                 tp->bufmgr_config.mbuf_high_water =
8273                         DEFAULT_MB_HIGH_WATER_5705;
8274         }
8275
8276 #if TG3_TSO_SUPPORT != 0
8277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8278             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8279             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8280             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8281              GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8282                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8283         } else {
8284                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8285         }
8286
8287         /* TSO is off by default, user can enable using ethtool.  */
8288 #if 0
8289         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8290                 dev->features |= NETIF_F_TSO;
8291 #endif
8292
8293 #endif
8294
8295         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8296             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8297             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8298                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8299                 tp->rx_pending = 63;
8300         }
8301
8302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8303                 tp->pdev_peer = tg3_find_5704_peer(tp);
8304
8305         err = tg3_get_device_address(tp);
8306         if (err) {
8307                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8308                        "aborting.\n");
8309                 goto err_out_iounmap;
8310         }
8311
8312         /*
8313          * Reset chip in case UNDI or EFI driver did not shutdown
8314          * DMA self test will enable WDMAC and we'll see (spurious)
8315          * pending DMA on the PCI bus at that point.
8316          */
8317         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8318             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8319                 pci_save_state(tp->pdev, tp->pci_cfg_state);
8320                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8321                 tg3_halt(tp);
8322         }
8323
8324         err = tg3_test_dma(tp);
8325         if (err) {
8326                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8327                 goto err_out_iounmap;
8328         }
8329
8330         /* Tigon3 can do ipv4 only... and some chips have buggy
8331          * checksumming.
8332          */
8333         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8334                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8335                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8336         } else
8337                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8338
8339         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8340                 dev->features &= ~NETIF_F_HIGHDMA;
8341
8342         /* flow control autonegotiation is default behavior */
8343         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8344
8345         err = register_netdev(dev);
8346         if (err) {
8347                 printk(KERN_ERR PFX "Cannot register net device, "
8348                        "aborting.\n");
8349                 goto err_out_iounmap;
8350         }
8351
8352         pci_set_drvdata(pdev, dev);
8353
8354         /* Now that we have fully setup the chip, save away a snapshot
8355          * of the PCI config space.  We need to restore this after
8356          * GRC_MISC_CFG core clock resets and some resume events.
8357          */
8358         pci_save_state(tp->pdev, tp->pci_cfg_state);
8359
8360         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8361                dev->name,
8362                tp->board_part_number,
8363                tp->pci_chip_rev_id,
8364                tg3_phy_string(tp),
8365                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8366                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8367                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8368                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8369                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8370                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8371
8372         for (i = 0; i < 6; i++)
8373                 printk("%2.2x%c", dev->dev_addr[i],
8374                        i == 5 ? '\n' : ':');
8375
8376         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8377                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8378                "TSOcap[%d] \n",
8379                dev->name,
8380                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8381                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8382                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8383                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8384                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8385                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8386                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8387
8388         return 0;
8389
8390 err_out_iounmap:
8391         iounmap(tp->regs);
8392
8393 err_out_free_dev:
8394         free_netdev(dev);
8395
8396 err_out_free_res:
8397         pci_release_regions(pdev);
8398
8399 err_out_disable_pdev:
8400         pci_disable_device(pdev);
8401         pci_set_drvdata(pdev, NULL);
8402         return err;
8403 }
8404
8405 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8406 {
8407         struct net_device *dev = pci_get_drvdata(pdev);
8408
8409         if (dev) {
8410                 struct tg3 *tp = netdev_priv(dev);
8411
8412                 unregister_netdev(dev);
8413                 iounmap(tp->regs);
8414                 free_netdev(dev);
8415                 pci_release_regions(pdev);
8416                 pci_disable_device(pdev);
8417                 pci_set_drvdata(pdev, NULL);
8418         }
8419 }
8420
8421 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8422 {
8423         struct net_device *dev = pci_get_drvdata(pdev);
8424         struct tg3 *tp = netdev_priv(dev);
8425         int err;
8426
8427         if (!netif_running(dev))
8428                 return 0;
8429
8430         tg3_netif_stop(tp);
8431
8432         del_timer_sync(&tp->timer);
8433
8434         spin_lock_irq(&tp->lock);
8435         spin_lock(&tp->tx_lock);
8436         tg3_disable_ints(tp);
8437         spin_unlock(&tp->tx_lock);
8438         spin_unlock_irq(&tp->lock);
8439
8440         netif_device_detach(dev);
8441
8442         spin_lock_irq(&tp->lock);
8443         spin_lock(&tp->tx_lock);
8444         tg3_halt(tp);
8445         spin_unlock(&tp->tx_lock);
8446         spin_unlock_irq(&tp->lock);
8447
8448         err = tg3_set_power_state(tp, state);
8449         if (err) {
8450                 spin_lock_irq(&tp->lock);
8451                 spin_lock(&tp->tx_lock);
8452
8453                 tg3_init_hw(tp);
8454
8455                 tp->timer.expires = jiffies + tp->timer_offset;
8456                 add_timer(&tp->timer);
8457
8458                 netif_device_attach(dev);
8459                 tg3_netif_start(tp);
8460
8461                 spin_unlock(&tp->tx_lock);
8462                 spin_unlock_irq(&tp->lock);
8463         }
8464
8465         return err;
8466 }
8467
8468 static int tg3_resume(struct pci_dev *pdev)
8469 {
8470         struct net_device *dev = pci_get_drvdata(pdev);
8471         struct tg3 *tp = netdev_priv(dev);
8472         int err;
8473
8474         if (!netif_running(dev))
8475                 return 0;
8476
8477         pci_restore_state(tp->pdev, tp->pci_cfg_state);
8478
8479         err = tg3_set_power_state(tp, 0);
8480         if (err)
8481                 return err;
8482
8483         netif_device_attach(dev);
8484
8485         spin_lock_irq(&tp->lock);
8486         spin_lock(&tp->tx_lock);
8487
8488         tg3_init_hw(tp);
8489
8490         tp->timer.expires = jiffies + tp->timer_offset;
8491         add_timer(&tp->timer);
8492
8493         tg3_enable_ints(tp);
8494
8495         tg3_netif_start(tp);
8496
8497         spin_unlock(&tp->tx_lock);
8498         spin_unlock_irq(&tp->lock);
8499
8500         return 0;
8501 }
8502
8503 static struct pci_driver tg3_driver = {
8504         .name           = DRV_MODULE_NAME,
8505         .id_table       = tg3_pci_tbl,
8506         .probe          = tg3_init_one,
8507         .remove         = __devexit_p(tg3_remove_one),
8508         .suspend        = tg3_suspend,
8509         .resume         = tg3_resume
8510 };
8511
8512 static int __init tg3_init(void)
8513 {
8514         return pci_module_init(&tg3_driver);
8515 }
8516
8517 static void __exit tg3_cleanup(void)
8518 {
8519         pci_unregister_driver(&tg3_driver);
8520 }
8521
8522 module_init(tg3_init);
8523 module_exit(tg3_cleanup);