upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  *
8  * Firmware is:
9  *      Copyright (C) 2000-2003 Broadcom Corporation.
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
33
34 #include <net/checksum.h>
35
36 #include <asm/system.h>
37 #include <asm/io.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
40
41 #ifdef CONFIG_SPARC64
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
44 #include <asm/pbm.h>
45 #endif
46
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
49 #else
50 #define TG3_VLAN_TAG_USED 0
51 #endif
52
53 #ifdef NETIF_F_TSO
54 #define TG3_TSO_SUPPORT 1
55 #else
56 #define TG3_TSO_SUPPORT 0
57 #endif
58
59 #include "tg3.h"
60
61 #define DRV_MODULE_NAME         "tg3"
62 #define PFX DRV_MODULE_NAME     ": "
63 #define DRV_MODULE_VERSION      "3.14"
64 #define DRV_MODULE_RELDATE      "November 15, 2004"
65
66 #define TG3_DEF_MAC_MODE        0
67 #define TG3_DEF_RX_MODE         0
68 #define TG3_DEF_TX_MODE         0
69 #define TG3_DEF_MSG_ENABLE        \
70         (NETIF_MSG_DRV          | \
71          NETIF_MSG_PROBE        | \
72          NETIF_MSG_LINK         | \
73          NETIF_MSG_TIMER        | \
74          NETIF_MSG_IFDOWN       | \
75          NETIF_MSG_IFUP         | \
76          NETIF_MSG_RX_ERR       | \
77          NETIF_MSG_TX_ERR)
78
79 /* length of time before we decide the hardware is borked,
80  * and dev->tx_timeout() should be called to fix the problem
81  */
82 #define TG3_TX_TIMEOUT                  (5 * HZ)
83
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU                     60
86 #define TG3_MAX_MTU(tp) \
87         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
108          512 : 1024)
109
110 #define TG3_TX_RING_SIZE                512
111 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
112
113 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_RING_SIZE)
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116                                  TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118                                    TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
120                                  TG3_TX_RING_SIZE)
121 #define TX_RING_GAP(TP) \
122         (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP)                                              \
124         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
125           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
126           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 static char version[] __devinitdata =
139         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_VERSION(DRV_MODULE_VERSION);
145
146 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
147 module_param(tg3_debug, int, 0);
148 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { 0, }
233 };
234
235 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
236
237 static struct {
238         const char string[ETH_GSTRING_LEN];
239 } ethtool_stats_keys[TG3_NUM_STATS] = {
240         { "rx_octets" },
241         { "rx_fragments" },
242         { "rx_ucast_packets" },
243         { "rx_mcast_packets" },
244         { "rx_bcast_packets" },
245         { "rx_fcs_errors" },
246         { "rx_align_errors" },
247         { "rx_xon_pause_rcvd" },
248         { "rx_xoff_pause_rcvd" },
249         { "rx_mac_ctrl_rcvd" },
250         { "rx_xoff_entered" },
251         { "rx_frame_too_long_errors" },
252         { "rx_jabbers" },
253         { "rx_undersize_packets" },
254         { "rx_in_length_errors" },
255         { "rx_out_length_errors" },
256         { "rx_64_or_less_octet_packets" },
257         { "rx_65_to_127_octet_packets" },
258         { "rx_128_to_255_octet_packets" },
259         { "rx_256_to_511_octet_packets" },
260         { "rx_512_to_1023_octet_packets" },
261         { "rx_1024_to_1522_octet_packets" },
262         { "rx_1523_to_2047_octet_packets" },
263         { "rx_2048_to_4095_octet_packets" },
264         { "rx_4096_to_8191_octet_packets" },
265         { "rx_8192_to_9022_octet_packets" },
266
267         { "tx_octets" },
268         { "tx_collisions" },
269
270         { "tx_xon_sent" },
271         { "tx_xoff_sent" },
272         { "tx_flow_control" },
273         { "tx_mac_errors" },
274         { "tx_single_collisions" },
275         { "tx_mult_collisions" },
276         { "tx_deferred" },
277         { "tx_excessive_collisions" },
278         { "tx_late_collisions" },
279         { "tx_collide_2times" },
280         { "tx_collide_3times" },
281         { "tx_collide_4times" },
282         { "tx_collide_5times" },
283         { "tx_collide_6times" },
284         { "tx_collide_7times" },
285         { "tx_collide_8times" },
286         { "tx_collide_9times" },
287         { "tx_collide_10times" },
288         { "tx_collide_11times" },
289         { "tx_collide_12times" },
290         { "tx_collide_13times" },
291         { "tx_collide_14times" },
292         { "tx_collide_15times" },
293         { "tx_ucast_packets" },
294         { "tx_mcast_packets" },
295         { "tx_bcast_packets" },
296         { "tx_carrier_sense_errors" },
297         { "tx_discards" },
298         { "tx_errors" },
299
300         { "dma_writeq_full" },
301         { "dma_write_prioq_full" },
302         { "rxbds_empty" },
303         { "rx_discards" },
304         { "rx_errors" },
305         { "rx_threshold_hit" },
306
307         { "dma_readq_full" },
308         { "dma_read_prioq_full" },
309         { "tx_comp_queue_full" },
310
311         { "ring_set_send_prod_index" },
312         { "ring_status_update" },
313         { "nic_irqs" },
314         { "nic_avoided_irqs" },
315         { "nic_tx_threshold_hit" }
316 };
317
318 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
319 {
320         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
321                 unsigned long flags;
322
323                 spin_lock_irqsave(&tp->indirect_lock, flags);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
325                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
326                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
327         } else {
328                 writel(val, tp->regs + off);
329                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
330                         readl(tp->regs + off);
331         }
332 }
333
334 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
335 {
336         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
337                 unsigned long flags;
338
339                 spin_lock_irqsave(&tp->indirect_lock, flags);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
342                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
343         } else {
344                 void __iomem *dest = tp->regs + off;
345                 writel(val, dest);
346                 readl(dest);    /* always flush PCI write */
347         }
348 }
349
350 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
351 {
352         void __iomem *mbox = tp->regs + off;
353         writel(val, mbox);
354         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
355                 readl(mbox);
356 }
357
358 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         void __iomem *mbox = tp->regs + off;
361         writel(val, mbox);
362         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
363                 writel(val, mbox);
364         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
365                 readl(mbox);
366 }
367
368 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
369 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
370 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
371
372 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
373 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
374 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
375 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
376 #define tr32(reg)               readl(tp->regs + (reg))
377 #define tr16(reg)               readw(tp->regs + (reg))
378 #define tr8(reg)                readb(tp->regs + (reg))
379
380 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
381 {
382         unsigned long flags;
383
384         spin_lock_irqsave(&tp->indirect_lock, flags);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
386         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
387
388         /* Always leave this as zero. */
389         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
390         spin_unlock_irqrestore(&tp->indirect_lock, flags);
391 }
392
393 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
394 {
395         unsigned long flags;
396
397         spin_lock_irqsave(&tp->indirect_lock, flags);
398         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
399         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
400
401         /* Always leave this as zero. */
402         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
403         spin_unlock_irqrestore(&tp->indirect_lock, flags);
404 }
405
406 static void tg3_disable_ints(struct tg3 *tp)
407 {
408         tw32(TG3PCI_MISC_HOST_CTRL,
409              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
410         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
411         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
412 }
413
414 static inline void tg3_cond_int(struct tg3 *tp)
415 {
416         if (tp->hw_status->status & SD_STATUS_UPDATED)
417                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
418 }
419
420 static void tg3_enable_ints(struct tg3 *tp)
421 {
422         tw32(TG3PCI_MISC_HOST_CTRL,
423              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
424         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
425         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
426
427         tg3_cond_int(tp);
428 }
429
430 /* tg3_restart_ints
431  *  similar to tg3_enable_ints, but it can return without flushing the
432  *  PIO write which reenables interrupts
433  */
434 static void tg3_restart_ints(struct tg3 *tp)
435 {
436         tw32(TG3PCI_MISC_HOST_CTRL,
437                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
438         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
439         mmiowb();
440
441         tg3_cond_int(tp);
442 }
443
444 static inline void tg3_netif_stop(struct tg3 *tp)
445 {
446         netif_poll_disable(tp->dev);
447         netif_tx_disable(tp->dev);
448 }
449
450 static inline void tg3_netif_start(struct tg3 *tp)
451 {
452         netif_wake_queue(tp->dev);
453         /* NOTE: unconditional netif_wake_queue is only appropriate
454          * so long as all callers are assured to have free tx slots
455          * (such as after tg3_init_hw)
456          */
457         netif_poll_enable(tp->dev);
458         tg3_cond_int(tp);
459 }
460
461 static void tg3_switch_clocks(struct tg3 *tp)
462 {
463         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
464         u32 orig_clock_ctrl;
465
466         orig_clock_ctrl = clock_ctrl;
467         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
468                        CLOCK_CTRL_CLKRUN_OENABLE |
469                        0x1f);
470         tp->pci_clock_ctrl = clock_ctrl;
471
472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
474                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
475                         tw32_f(TG3PCI_CLOCK_CTRL,
476                                clock_ctrl | CLOCK_CTRL_625_CORE);
477                         udelay(40);
478                 }
479         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
480                 tw32_f(TG3PCI_CLOCK_CTRL,
481                      clock_ctrl |
482                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
483                 udelay(40);
484                 tw32_f(TG3PCI_CLOCK_CTRL,
485                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
486                 udelay(40);
487         }
488         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
489         udelay(40);
490 }
491
492 #define PHY_BUSY_LOOPS  5000
493
494 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
495 {
496         u32 frame_val;
497         int loops, ret;
498
499         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
500                 tw32_f(MAC_MI_MODE,
501                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
502                 udelay(80);
503         }
504
505         *val = 0xffffffff;
506
507         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
508                       MI_COM_PHY_ADDR_MASK);
509         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
510                       MI_COM_REG_ADDR_MASK);
511         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
512         
513         tw32_f(MAC_MI_COM, frame_val);
514
515         loops = PHY_BUSY_LOOPS;
516         while (loops-- > 0) {
517                 udelay(10);
518                 frame_val = tr32(MAC_MI_COM);
519
520                 if ((frame_val & MI_COM_BUSY) == 0) {
521                         udelay(5);
522                         frame_val = tr32(MAC_MI_COM);
523                         break;
524                 }
525         }
526
527         ret = -EBUSY;
528         if (loops > 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         int loops, ret;
545
546         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
547                 tw32_f(MAC_MI_MODE,
548                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
549                 udelay(80);
550         }
551
552         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
553                       MI_COM_PHY_ADDR_MASK);
554         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
555                       MI_COM_REG_ADDR_MASK);
556         frame_val |= (val & MI_COM_DATA_MASK);
557         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
558         
559         tw32_f(MAC_MI_COM, frame_val);
560
561         loops = PHY_BUSY_LOOPS;
562         while (loops-- > 0) {
563                 udelay(10);
564                 frame_val = tr32(MAC_MI_COM);
565                 if ((frame_val & MI_COM_BUSY) == 0) {
566                         udelay(5);
567                         frame_val = tr32(MAC_MI_COM);
568                         break;
569                 }
570         }
571
572         ret = -EBUSY;
573         if (loops > 0)
574                 ret = 0;
575
576         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
577                 tw32_f(MAC_MI_MODE, tp->mi_mode);
578                 udelay(80);
579         }
580
581         return ret;
582 }
583
584 static void tg3_phy_set_wirespeed(struct tg3 *tp)
585 {
586         u32 val;
587
588         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
589                 return;
590
591         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
592         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
593         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
594 }
595
596 static int tg3_bmcr_reset(struct tg3 *tp)
597 {
598         u32 phy_control;
599         int limit, err;
600
601         /* OK, reset it, and poll the BMCR_RESET bit until it
602          * clears or we time out.
603          */
604         phy_control = BMCR_RESET;
605         err = tg3_writephy(tp, MII_BMCR, phy_control);
606         if (err != 0)
607                 return -EBUSY;
608
609         limit = 5000;
610         while (limit--) {
611                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
612                 if (err != 0)
613                         return -EBUSY;
614
615                 if ((phy_control & BMCR_RESET) == 0) {
616                         udelay(40);
617                         break;
618                 }
619                 udelay(10);
620         }
621         if (limit <= 0)
622                 return -EBUSY;
623
624         return 0;
625 }
626
627 static int tg3_wait_macro_done(struct tg3 *tp)
628 {
629         int limit = 100;
630
631         while (limit--) {
632                 u32 tmp32;
633
634                 tg3_readphy(tp, 0x16, &tmp32);
635                 if ((tmp32 & 0x1000) == 0)
636                         break;
637         }
638         if (limit <= 0)
639                 return -EBUSY;
640
641         return 0;
642 }
643
644 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
645 {
646         static const u32 test_pat[4][6] = {
647         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
648         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
649         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
650         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
651         };
652         int chan;
653
654         for (chan = 0; chan < 4; chan++) {
655                 int i;
656
657                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
658                              (chan * 0x2000) | 0x0200);
659                 tg3_writephy(tp, 0x16, 0x0002);
660
661                 for (i = 0; i < 6; i++)
662                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
663                                      test_pat[chan][i]);
664
665                 tg3_writephy(tp, 0x16, 0x0202);
666                 if (tg3_wait_macro_done(tp)) {
667                         *resetp = 1;
668                         return -EBUSY;
669                 }
670
671                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
672                              (chan * 0x2000) | 0x0200);
673                 tg3_writephy(tp, 0x16, 0x0082);
674                 if (tg3_wait_macro_done(tp)) {
675                         *resetp = 1;
676                         return -EBUSY;
677                 }
678
679                 tg3_writephy(tp, 0x16, 0x0802);
680                 if (tg3_wait_macro_done(tp)) {
681                         *resetp = 1;
682                         return -EBUSY;
683                 }
684
685                 for (i = 0; i < 6; i += 2) {
686                         u32 low, high;
687
688                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
689                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
690                         if (tg3_wait_macro_done(tp)) {
691                                 *resetp = 1;
692                                 return -EBUSY;
693                         }
694                         low &= 0x7fff;
695                         high &= 0x000f;
696                         if (low != test_pat[chan][i] ||
697                             high != test_pat[chan][i+1]) {
698                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
699                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
700                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
701
702                                 return -EBUSY;
703                         }
704                 }
705         }
706
707         return 0;
708 }
709
710 static int tg3_phy_reset_chanpat(struct tg3 *tp)
711 {
712         int chan;
713
714         for (chan = 0; chan < 4; chan++) {
715                 int i;
716
717                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
718                              (chan * 0x2000) | 0x0200);
719                 tg3_writephy(tp, 0x16, 0x0002);
720                 for (i = 0; i < 6; i++)
721                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
722                 tg3_writephy(tp, 0x16, 0x0202);
723                 if (tg3_wait_macro_done(tp))
724                         return -EBUSY;
725         }
726
727         return 0;
728 }
729
730 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
731 {
732         u32 reg32, phy9_orig;
733         int retries, do_phy_reset, err;
734
735         retries = 10;
736         do_phy_reset = 1;
737         do {
738                 if (do_phy_reset) {
739                         err = tg3_bmcr_reset(tp);
740                         if (err)
741                                 return err;
742                         do_phy_reset = 0;
743                 }
744
745                 /* Disable transmitter and interrupt.  */
746                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
747                 reg32 |= 0x3000;
748                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
749
750                 /* Set full-duplex, 1000 mbps.  */
751                 tg3_writephy(tp, MII_BMCR,
752                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
753
754                 /* Set to master mode.  */
755                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
756                 tg3_writephy(tp, MII_TG3_CTRL,
757                              (MII_TG3_CTRL_AS_MASTER |
758                               MII_TG3_CTRL_ENABLE_AS_MASTER));
759
760                 /* Enable SM_DSP_CLOCK and 6dB.  */
761                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
762
763                 /* Block the PHY control access.  */
764                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
765                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
766
767                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
768                 if (!err)
769                         break;
770         } while (--retries);
771
772         err = tg3_phy_reset_chanpat(tp);
773         if (err)
774                 return err;
775
776         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
777         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
778
779         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
780         tg3_writephy(tp, 0x16, 0x0000);
781
782         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
783             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
784                 /* Set Extended packet length bit for jumbo frames */
785                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
786         }
787         else {
788                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
789         }
790
791         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
792
793         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
794         reg32 &= ~0x3000;
795         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
796
797         return err;
798 }
799
800 /* This will reset the tigon3 PHY if there is no valid
801  * link unless the FORCE argument is non-zero.
802  */
803 static int tg3_phy_reset(struct tg3 *tp)
804 {
805         u32 phy_status;
806         int err;
807
808         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
809         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
810         if (err != 0)
811                 return -EBUSY;
812
813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
816                 err = tg3_phy_reset_5703_4_5(tp);
817                 if (err)
818                         return err;
819                 goto out;
820         }
821
822         err = tg3_bmcr_reset(tp);
823         if (err)
824                 return err;
825
826 out:
827         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
828                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
829                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
830                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
832                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
833                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
834         }
835         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
836                 tg3_writephy(tp, 0x1c, 0x8d68);
837                 tg3_writephy(tp, 0x1c, 0x8d68);
838         }
839         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
840                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
843                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
844                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
845                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
846                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
847                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
848         }
849         /* Set Extended packet length bit (bit 14) on all chips that */
850         /* support jumbo frames */
851         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
852                 /* Cannot do read-modify-write on 5401 */
853                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
854         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
855                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
856                 u32 phy_reg;
857
858                 /* Set bit 14 with read-modify-write to preserve other bits */
859                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
860                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
862         }
863         tg3_phy_set_wirespeed(tp);
864         return 0;
865 }
866
867 static void tg3_frob_aux_power(struct tg3 *tp)
868 {
869         struct tg3 *tp_peer = tp;
870
871         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
872                 return;
873
874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
875                 tp_peer = pci_get_drvdata(tp->pdev_peer);
876                 if (!tp_peer)
877                         BUG();
878         }
879
880
881         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
882             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
883                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
884                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
885                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
886                              (GRC_LCLCTRL_GPIO_OE0 |
887                               GRC_LCLCTRL_GPIO_OE1 |
888                               GRC_LCLCTRL_GPIO_OE2 |
889                               GRC_LCLCTRL_GPIO_OUTPUT0 |
890                               GRC_LCLCTRL_GPIO_OUTPUT1));
891                         udelay(100);
892                 } else {
893                         int no_gpio2;
894                         u32 grc_local_ctrl;
895
896                         if (tp_peer != tp &&
897                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
898                                 return;
899
900                         /* On 5753 and variants, GPIO2 cannot be used. */
901                         no_gpio2 = (tp->nic_sram_data_cfg &
902                                     NIC_SRAM_DATA_CFG_NO_GPIO2) != 0;
903
904                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
905                                          GRC_LCLCTRL_GPIO_OE1 |
906                                          GRC_LCLCTRL_GPIO_OE2 |
907                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
908                                          GRC_LCLCTRL_GPIO_OUTPUT2;
909                         if (no_gpio2) {
910                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
911                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
912                         }
913                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
914                                grc_local_ctrl);
915                         udelay(100);
916
917                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
918                                          GRC_LCLCTRL_GPIO_OE1 |
919                                          GRC_LCLCTRL_GPIO_OE2 |
920                                          GRC_LCLCTRL_GPIO_OUTPUT0 |
921                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
922                                          GRC_LCLCTRL_GPIO_OUTPUT2;
923                         if (no_gpio2) {
924                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
925                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
926                         }
927                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
928                                grc_local_ctrl);
929                         udelay(100);
930
931                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
932                                          GRC_LCLCTRL_GPIO_OE1 |
933                                          GRC_LCLCTRL_GPIO_OE2 |
934                                          GRC_LCLCTRL_GPIO_OUTPUT0 |
935                                          GRC_LCLCTRL_GPIO_OUTPUT1;
936                         if (!no_gpio2) {
937                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
938                                        grc_local_ctrl);
939                                 udelay(100);
940                         }
941                 }
942         } else {
943                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
944                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
945                         if (tp_peer != tp &&
946                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
947                                 return;
948
949                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
950                              (GRC_LCLCTRL_GPIO_OE1 |
951                               GRC_LCLCTRL_GPIO_OUTPUT1));
952                         udelay(100);
953
954                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
955                              (GRC_LCLCTRL_GPIO_OE1));
956                         udelay(100);
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962                 }
963         }
964 }
965
966 static int tg3_setup_phy(struct tg3 *, int);
967
968 #define RESET_KIND_SHUTDOWN     0
969 #define RESET_KIND_INIT         1
970 #define RESET_KIND_SUSPEND      2
971
972 static void tg3_write_sig_post_reset(struct tg3 *, int);
973
974 static int tg3_set_power_state(struct tg3 *tp, int state)
975 {
976         u32 misc_host_ctrl;
977         u16 power_control, power_caps;
978         int pm = tp->pm_cap;
979
980         /* Make sure register accesses (indirect or otherwise)
981          * will function correctly.
982          */
983         pci_write_config_dword(tp->pdev,
984                                TG3PCI_MISC_HOST_CTRL,
985                                tp->misc_host_ctrl);
986
987         pci_read_config_word(tp->pdev,
988                              pm + PCI_PM_CTRL,
989                              &power_control);
990         power_control |= PCI_PM_CTRL_PME_STATUS;
991         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
992         switch (state) {
993         case 0:
994                 power_control |= 0;
995                 pci_write_config_word(tp->pdev,
996                                       pm + PCI_PM_CTRL,
997                                       power_control);
998                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
999                 udelay(100);
1000
1001                 return 0;
1002
1003         case 1:
1004                 power_control |= 1;
1005                 break;
1006
1007         case 2:
1008                 power_control |= 2;
1009                 break;
1010
1011         case 3:
1012                 power_control |= 3;
1013                 break;
1014
1015         default:
1016                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1017                        "requested.\n",
1018                        tp->dev->name, state);
1019                 return -EINVAL;
1020         };
1021
1022         power_control |= PCI_PM_CTRL_PME_ENABLE;
1023
1024         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1025         tw32(TG3PCI_MISC_HOST_CTRL,
1026              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1027
1028         if (tp->link_config.phy_is_low_power == 0) {
1029                 tp->link_config.phy_is_low_power = 1;
1030                 tp->link_config.orig_speed = tp->link_config.speed;
1031                 tp->link_config.orig_duplex = tp->link_config.duplex;
1032                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1033         }
1034
1035         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1036                 tp->link_config.speed = SPEED_10;
1037                 tp->link_config.duplex = DUPLEX_HALF;
1038                 tp->link_config.autoneg = AUTONEG_ENABLE;
1039                 tg3_setup_phy(tp, 0);
1040         }
1041
1042         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1043
1044         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1045                 u32 mac_mode;
1046
1047                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1048                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1049                         udelay(40);
1050
1051                         mac_mode = MAC_MODE_PORT_MODE_MII;
1052
1053                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1054                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1055                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1056                 } else {
1057                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1058                 }
1059
1060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1061                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1062
1063                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1064                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1065                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1066
1067                 tw32_f(MAC_MODE, mac_mode);
1068                 udelay(100);
1069
1070                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1071                 udelay(10);
1072         }
1073
1074         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1075             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1076              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1077                 u32 base_val;
1078
1079                 base_val = tp->pci_clock_ctrl;
1080                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1081                              CLOCK_CTRL_TXCLK_DISABLE);
1082
1083                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1084                      CLOCK_CTRL_ALTCLK |
1085                      CLOCK_CTRL_PWRDOWN_PLL133);
1086                 udelay(40);
1087         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1088                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1089                 u32 newbits1, newbits2;
1090
1091                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1092                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1093                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1094                                     CLOCK_CTRL_TXCLK_DISABLE |
1095                                     CLOCK_CTRL_ALTCLK);
1096                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1097                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1098                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1099                         newbits1 = CLOCK_CTRL_625_CORE;
1100                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1101                 } else {
1102                         newbits1 = CLOCK_CTRL_ALTCLK;
1103                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1104                 }
1105
1106                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1107                 udelay(40);
1108
1109                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1110                 udelay(40);
1111
1112                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1113                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1114                         u32 newbits3;
1115
1116                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1117                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1118                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1119                                             CLOCK_CTRL_TXCLK_DISABLE |
1120                                             CLOCK_CTRL_44MHZ_CORE);
1121                         } else {
1122                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1123                         }
1124
1125                         tw32_f(TG3PCI_CLOCK_CTRL,
1126                                          tp->pci_clock_ctrl | newbits3);
1127                         udelay(40);
1128                 }
1129         }
1130
1131         tg3_frob_aux_power(tp);
1132
1133         /* Finally, set the new power state. */
1134         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1135
1136         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1137
1138         return 0;
1139 }
1140
1141 static void tg3_link_report(struct tg3 *tp)
1142 {
1143         if (!netif_carrier_ok(tp->dev)) {
1144                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1145         } else {
1146                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1147                        tp->dev->name,
1148                        (tp->link_config.active_speed == SPEED_1000 ?
1149                         1000 :
1150                         (tp->link_config.active_speed == SPEED_100 ?
1151                          100 : 10)),
1152                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1153                         "full" : "half"));
1154
1155                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1156                        "%s for RX.\n",
1157                        tp->dev->name,
1158                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1159                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1160         }
1161 }
1162
1163 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1164 {
1165         u32 new_tg3_flags = 0;
1166         u32 old_rx_mode = tp->rx_mode;
1167         u32 old_tx_mode = tp->tx_mode;
1168
1169         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1170                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1171                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1172                                 if (remote_adv & LPA_PAUSE_CAP)
1173                                         new_tg3_flags |=
1174                                                 (TG3_FLAG_RX_PAUSE |
1175                                                 TG3_FLAG_TX_PAUSE);
1176                                 else if (remote_adv & LPA_PAUSE_ASYM)
1177                                         new_tg3_flags |=
1178                                                 (TG3_FLAG_RX_PAUSE);
1179                         } else {
1180                                 if (remote_adv & LPA_PAUSE_CAP)
1181                                         new_tg3_flags |=
1182                                                 (TG3_FLAG_RX_PAUSE |
1183                                                 TG3_FLAG_TX_PAUSE);
1184                         }
1185                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1186                         if ((remote_adv & LPA_PAUSE_CAP) &&
1187                         (remote_adv & LPA_PAUSE_ASYM))
1188                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1189                 }
1190
1191                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1192                 tp->tg3_flags |= new_tg3_flags;
1193         } else {
1194                 new_tg3_flags = tp->tg3_flags;
1195         }
1196
1197         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1198                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1199         else
1200                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1201
1202         if (old_rx_mode != tp->rx_mode) {
1203                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1204         }
1205         
1206         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1207                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1208         else
1209                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1210
1211         if (old_tx_mode != tp->tx_mode) {
1212                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1213         }
1214 }
1215
1216 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1217 {
1218         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1219         case MII_TG3_AUX_STAT_10HALF:
1220                 *speed = SPEED_10;
1221                 *duplex = DUPLEX_HALF;
1222                 break;
1223
1224         case MII_TG3_AUX_STAT_10FULL:
1225                 *speed = SPEED_10;
1226                 *duplex = DUPLEX_FULL;
1227                 break;
1228
1229         case MII_TG3_AUX_STAT_100HALF:
1230                 *speed = SPEED_100;
1231                 *duplex = DUPLEX_HALF;
1232                 break;
1233
1234         case MII_TG3_AUX_STAT_100FULL:
1235                 *speed = SPEED_100;
1236                 *duplex = DUPLEX_FULL;
1237                 break;
1238
1239         case MII_TG3_AUX_STAT_1000HALF:
1240                 *speed = SPEED_1000;
1241                 *duplex = DUPLEX_HALF;
1242                 break;
1243
1244         case MII_TG3_AUX_STAT_1000FULL:
1245                 *speed = SPEED_1000;
1246                 *duplex = DUPLEX_FULL;
1247                 break;
1248
1249         default:
1250                 *speed = SPEED_INVALID;
1251                 *duplex = DUPLEX_INVALID;
1252                 break;
1253         };
1254 }
1255
1256 static int tg3_phy_copper_begin(struct tg3 *tp)
1257 {
1258         u32 new_adv;
1259         int i;
1260
1261         if (tp->link_config.phy_is_low_power) {
1262                 /* Entering low power mode.  Disable gigabit and
1263                  * 100baseT advertisements.
1264                  */
1265                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1266
1267                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1268                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1269                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1270                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1271
1272                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1273         } else if (tp->link_config.speed == SPEED_INVALID) {
1274                 tp->link_config.advertising =
1275                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1276                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1277                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1278                          ADVERTISED_Autoneg | ADVERTISED_MII);
1279
1280                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1281                         tp->link_config.advertising &=
1282                                 ~(ADVERTISED_1000baseT_Half |
1283                                   ADVERTISED_1000baseT_Full);
1284
1285                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1286                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1287                         new_adv |= ADVERTISE_10HALF;
1288                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1289                         new_adv |= ADVERTISE_10FULL;
1290                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1291                         new_adv |= ADVERTISE_100HALF;
1292                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1293                         new_adv |= ADVERTISE_100FULL;
1294                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1295
1296                 if (tp->link_config.advertising &
1297                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1298                         new_adv = 0;
1299                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1300                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1301                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1302                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1303                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1304                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1305                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1306                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1307                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1308                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1309                 } else {
1310                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1311                 }
1312         } else {
1313                 /* Asking for a specific link mode. */
1314                 if (tp->link_config.speed == SPEED_1000) {
1315                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1316                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1317
1318                         if (tp->link_config.duplex == DUPLEX_FULL)
1319                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1320                         else
1321                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1322                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1323                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1324                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1325                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1326                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1327                 } else {
1328                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1329
1330                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1331                         if (tp->link_config.speed == SPEED_100) {
1332                                 if (tp->link_config.duplex == DUPLEX_FULL)
1333                                         new_adv |= ADVERTISE_100FULL;
1334                                 else
1335                                         new_adv |= ADVERTISE_100HALF;
1336                         } else {
1337                                 if (tp->link_config.duplex == DUPLEX_FULL)
1338                                         new_adv |= ADVERTISE_10FULL;
1339                                 else
1340                                         new_adv |= ADVERTISE_10HALF;
1341                         }
1342                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1343                 }
1344         }
1345
1346         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1347             tp->link_config.speed != SPEED_INVALID) {
1348                 u32 bmcr, orig_bmcr;
1349
1350                 tp->link_config.active_speed = tp->link_config.speed;
1351                 tp->link_config.active_duplex = tp->link_config.duplex;
1352
1353                 bmcr = 0;
1354                 switch (tp->link_config.speed) {
1355                 default:
1356                 case SPEED_10:
1357                         break;
1358
1359                 case SPEED_100:
1360                         bmcr |= BMCR_SPEED100;
1361                         break;
1362
1363                 case SPEED_1000:
1364                         bmcr |= TG3_BMCR_SPEED1000;
1365                         break;
1366                 };
1367
1368                 if (tp->link_config.duplex == DUPLEX_FULL)
1369                         bmcr |= BMCR_FULLDPLX;
1370
1371                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1372                 if (bmcr != orig_bmcr) {
1373                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1374                         for (i = 0; i < 1500; i++) {
1375                                 u32 tmp;
1376
1377                                 udelay(10);
1378                                 tg3_readphy(tp, MII_BMSR, &tmp);
1379                                 tg3_readphy(tp, MII_BMSR, &tmp);
1380                                 if (!(tmp & BMSR_LSTATUS)) {
1381                                         udelay(40);
1382                                         break;
1383                                 }
1384                         }
1385                         tg3_writephy(tp, MII_BMCR, bmcr);
1386                         udelay(40);
1387                 }
1388         } else {
1389                 tg3_writephy(tp, MII_BMCR,
1390                              BMCR_ANENABLE | BMCR_ANRESTART);
1391         }
1392
1393         return 0;
1394 }
1395
1396 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1397 {
1398         int err;
1399
1400         /* Turn off tap power management. */
1401         /* Set Extended packet length bit */
1402         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1403
1404         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1405         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1406
1407         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1408         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1409
1410         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1411         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1412
1413         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1414         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1415
1416         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1417         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1418
1419         udelay(40);
1420
1421         return err;
1422 }
1423
1424 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1425 {
1426         u32 adv_reg, all_mask;
1427
1428         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1429         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1430                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1431         if ((adv_reg & all_mask) != all_mask)
1432                 return 0;
1433         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1434                 u32 tg3_ctrl;
1435
1436                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1437                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1438                             MII_TG3_CTRL_ADV_1000_FULL);
1439                 if ((tg3_ctrl & all_mask) != all_mask)
1440                         return 0;
1441         }
1442         return 1;
1443 }
1444
1445 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1446 {
1447         int current_link_up;
1448         u32 bmsr, dummy;
1449         u16 current_speed;
1450         u8 current_duplex;
1451         int i, err;
1452
1453         tw32(MAC_EVENT, 0);
1454
1455         tw32_f(MAC_STATUS,
1456              (MAC_STATUS_SYNC_CHANGED |
1457               MAC_STATUS_CFG_CHANGED |
1458               MAC_STATUS_MI_COMPLETION |
1459               MAC_STATUS_LNKSTATE_CHANGED));
1460         udelay(40);
1461
1462         tp->mi_mode = MAC_MI_MODE_BASE;
1463         tw32_f(MAC_MI_MODE, tp->mi_mode);
1464         udelay(80);
1465
1466         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1467
1468         /* Some third-party PHYs need to be reset on link going
1469          * down.
1470          */
1471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1472              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1473              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1474             netif_carrier_ok(tp->dev)) {
1475                 tg3_readphy(tp, MII_BMSR, &bmsr);
1476                 tg3_readphy(tp, MII_BMSR, &bmsr);
1477                 if (!(bmsr & BMSR_LSTATUS))
1478                         force_reset = 1;
1479         }
1480         if (force_reset)
1481                 tg3_phy_reset(tp);
1482
1483         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1484                 tg3_readphy(tp, MII_BMSR, &bmsr);
1485                 tg3_readphy(tp, MII_BMSR, &bmsr);
1486
1487                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1488                         bmsr = 0;
1489
1490                 if (!(bmsr & BMSR_LSTATUS)) {
1491                         err = tg3_init_5401phy_dsp(tp);
1492                         if (err)
1493                                 return err;
1494
1495                         tg3_readphy(tp, MII_BMSR, &bmsr);
1496                         for (i = 0; i < 1000; i++) {
1497                                 udelay(10);
1498                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1499                                 if (bmsr & BMSR_LSTATUS) {
1500                                         udelay(40);
1501                                         break;
1502                                 }
1503                         }
1504
1505                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1506                             !(bmsr & BMSR_LSTATUS) &&
1507                             tp->link_config.active_speed == SPEED_1000) {
1508                                 err = tg3_phy_reset(tp);
1509                                 if (!err)
1510                                         err = tg3_init_5401phy_dsp(tp);
1511                                 if (err)
1512                                         return err;
1513                         }
1514                 }
1515         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1516                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1517                 /* 5701 {A0,B0} CRC bug workaround */
1518                 tg3_writephy(tp, 0x15, 0x0a75);
1519                 tg3_writephy(tp, 0x1c, 0x8c68);
1520                 tg3_writephy(tp, 0x1c, 0x8d68);
1521                 tg3_writephy(tp, 0x1c, 0x8c68);
1522         }
1523
1524         /* Clear pending interrupts... */
1525         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1526         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1527
1528         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1529                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1530         else
1531                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1532
1533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1535                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1536                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1537                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1538                 else
1539                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1540         }
1541
1542         current_link_up = 0;
1543         current_speed = SPEED_INVALID;
1544         current_duplex = DUPLEX_INVALID;
1545
1546         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1547                 u32 val;
1548
1549                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1550                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1551                 if (!(val & (1 << 10))) {
1552                         val |= (1 << 10);
1553                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1554                         goto relink;
1555                 }
1556         }
1557
1558         bmsr = 0;
1559         for (i = 0; i < 100; i++) {
1560                 tg3_readphy(tp, MII_BMSR, &bmsr);
1561                 tg3_readphy(tp, MII_BMSR, &bmsr);
1562                 if (bmsr & BMSR_LSTATUS)
1563                         break;
1564                 udelay(40);
1565         }
1566
1567         if (bmsr & BMSR_LSTATUS) {
1568                 u32 aux_stat, bmcr;
1569
1570                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1571                 for (i = 0; i < 2000; i++) {
1572                         udelay(10);
1573                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1574                         if (aux_stat)
1575                                 break;
1576                 }
1577
1578                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1579                                              &current_speed,
1580                                              &current_duplex);
1581
1582                 bmcr = 0;
1583                 for (i = 0; i < 200; i++) {
1584                         tg3_readphy(tp, MII_BMCR, &bmcr);
1585                         tg3_readphy(tp, MII_BMCR, &bmcr);
1586                         if (bmcr && bmcr != 0x7fff)
1587                                 break;
1588                         udelay(10);
1589                 }
1590
1591                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1592                         if (bmcr & BMCR_ANENABLE) {
1593                                 current_link_up = 1;
1594
1595                                 /* Force autoneg restart if we are exiting
1596                                  * low power mode.
1597                                  */
1598                                 if (!tg3_copper_is_advertising_all(tp))
1599                                         current_link_up = 0;
1600                         } else {
1601                                 current_link_up = 0;
1602                         }
1603                 } else {
1604                         if (!(bmcr & BMCR_ANENABLE) &&
1605                             tp->link_config.speed == current_speed &&
1606                             tp->link_config.duplex == current_duplex) {
1607                                 current_link_up = 1;
1608                         } else {
1609                                 current_link_up = 0;
1610                         }
1611                 }
1612
1613                 tp->link_config.active_speed = current_speed;
1614                 tp->link_config.active_duplex = current_duplex;
1615         }
1616
1617         if (current_link_up == 1 &&
1618             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1619             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1620                 u32 local_adv, remote_adv;
1621
1622                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1623                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1624
1625                 tg3_readphy(tp, MII_LPA, &remote_adv);
1626                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1627
1628                 /* If we are not advertising full pause capability,
1629                  * something is wrong.  Bring the link down and reconfigure.
1630                  */
1631                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1632                         current_link_up = 0;
1633                 } else {
1634                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1635                 }
1636         }
1637 relink:
1638         if (current_link_up == 0) {
1639                 u32 tmp;
1640
1641                 tg3_phy_copper_begin(tp);
1642
1643                 tg3_readphy(tp, MII_BMSR, &tmp);
1644                 tg3_readphy(tp, MII_BMSR, &tmp);
1645                 if (tmp & BMSR_LSTATUS)
1646                         current_link_up = 1;
1647         }
1648
1649         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1650         if (current_link_up == 1) {
1651                 if (tp->link_config.active_speed == SPEED_100 ||
1652                     tp->link_config.active_speed == SPEED_10)
1653                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1654                 else
1655                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1656         } else
1657                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1658
1659         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1660         if (tp->link_config.active_duplex == DUPLEX_HALF)
1661                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1662
1663         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1665                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1666                     (current_link_up == 1 &&
1667                      tp->link_config.active_speed == SPEED_10))
1668                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1669         } else {
1670                 if (current_link_up == 1)
1671                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1672         }
1673
1674         /* ??? Without this setting Netgear GA302T PHY does not
1675          * ??? send/receive packets...
1676          */
1677         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1678             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1679                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1680                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1681                 udelay(80);
1682         }
1683
1684         tw32_f(MAC_MODE, tp->mac_mode);
1685         udelay(40);
1686
1687         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1688                 /* Polled via timer. */
1689                 tw32_f(MAC_EVENT, 0);
1690         } else {
1691                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1692         }
1693         udelay(40);
1694
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1696             current_link_up == 1 &&
1697             tp->link_config.active_speed == SPEED_1000 &&
1698             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1699              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1700                 udelay(120);
1701                 tw32_f(MAC_STATUS,
1702                      (MAC_STATUS_SYNC_CHANGED |
1703                       MAC_STATUS_CFG_CHANGED));
1704                 udelay(40);
1705                 tg3_write_mem(tp,
1706                               NIC_SRAM_FIRMWARE_MBOX,
1707                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1708         }
1709
1710         if (current_link_up != netif_carrier_ok(tp->dev)) {
1711                 if (current_link_up)
1712                         netif_carrier_on(tp->dev);
1713                 else
1714                         netif_carrier_off(tp->dev);
1715                 tg3_link_report(tp);
1716         }
1717
1718         return 0;
1719 }
1720
1721 struct tg3_fiber_aneginfo {
1722         int state;
1723 #define ANEG_STATE_UNKNOWN              0
1724 #define ANEG_STATE_AN_ENABLE            1
1725 #define ANEG_STATE_RESTART_INIT         2
1726 #define ANEG_STATE_RESTART              3
1727 #define ANEG_STATE_DISABLE_LINK_OK      4
1728 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1729 #define ANEG_STATE_ABILITY_DETECT       6
1730 #define ANEG_STATE_ACK_DETECT_INIT      7
1731 #define ANEG_STATE_ACK_DETECT           8
1732 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1733 #define ANEG_STATE_COMPLETE_ACK         10
1734 #define ANEG_STATE_IDLE_DETECT_INIT     11
1735 #define ANEG_STATE_IDLE_DETECT          12
1736 #define ANEG_STATE_LINK_OK              13
1737 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1738 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1739
1740         u32 flags;
1741 #define MR_AN_ENABLE            0x00000001
1742 #define MR_RESTART_AN           0x00000002
1743 #define MR_AN_COMPLETE          0x00000004
1744 #define MR_PAGE_RX              0x00000008
1745 #define MR_NP_LOADED            0x00000010
1746 #define MR_TOGGLE_TX            0x00000020
1747 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1748 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1749 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1750 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1751 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1752 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1753 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1754 #define MR_TOGGLE_RX            0x00002000
1755 #define MR_NP_RX                0x00004000
1756
1757 #define MR_LINK_OK              0x80000000
1758
1759         unsigned long link_time, cur_time;
1760
1761         u32 ability_match_cfg;
1762         int ability_match_count;
1763
1764         char ability_match, idle_match, ack_match;
1765
1766         u32 txconfig, rxconfig;
1767 #define ANEG_CFG_NP             0x00000080
1768 #define ANEG_CFG_ACK            0x00000040
1769 #define ANEG_CFG_RF2            0x00000020
1770 #define ANEG_CFG_RF1            0x00000010
1771 #define ANEG_CFG_PS2            0x00000001
1772 #define ANEG_CFG_PS1            0x00008000
1773 #define ANEG_CFG_HD             0x00004000
1774 #define ANEG_CFG_FD             0x00002000
1775 #define ANEG_CFG_INVAL          0x00001f06
1776
1777 };
1778 #define ANEG_OK         0
1779 #define ANEG_DONE       1
1780 #define ANEG_TIMER_ENAB 2
1781 #define ANEG_FAILED     -1
1782
1783 #define ANEG_STATE_SETTLE_TIME  10000
1784
1785 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1786                                    struct tg3_fiber_aneginfo *ap)
1787 {
1788         unsigned long delta;
1789         u32 rx_cfg_reg;
1790         int ret;
1791
1792         if (ap->state == ANEG_STATE_UNKNOWN) {
1793                 ap->rxconfig = 0;
1794                 ap->link_time = 0;
1795                 ap->cur_time = 0;
1796                 ap->ability_match_cfg = 0;
1797                 ap->ability_match_count = 0;
1798                 ap->ability_match = 0;
1799                 ap->idle_match = 0;
1800                 ap->ack_match = 0;
1801         }
1802         ap->cur_time++;
1803
1804         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1805                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1806
1807                 if (rx_cfg_reg != ap->ability_match_cfg) {
1808                         ap->ability_match_cfg = rx_cfg_reg;
1809                         ap->ability_match = 0;
1810                         ap->ability_match_count = 0;
1811                 } else {
1812                         if (++ap->ability_match_count > 1) {
1813                                 ap->ability_match = 1;
1814                                 ap->ability_match_cfg = rx_cfg_reg;
1815                         }
1816                 }
1817                 if (rx_cfg_reg & ANEG_CFG_ACK)
1818                         ap->ack_match = 1;
1819                 else
1820                         ap->ack_match = 0;
1821
1822                 ap->idle_match = 0;
1823         } else {
1824                 ap->idle_match = 1;
1825                 ap->ability_match_cfg = 0;
1826                 ap->ability_match_count = 0;
1827                 ap->ability_match = 0;
1828                 ap->ack_match = 0;
1829
1830                 rx_cfg_reg = 0;
1831         }
1832
1833         ap->rxconfig = rx_cfg_reg;
1834         ret = ANEG_OK;
1835
1836         switch(ap->state) {
1837         case ANEG_STATE_UNKNOWN:
1838                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1839                         ap->state = ANEG_STATE_AN_ENABLE;
1840
1841                 /* fallthru */
1842         case ANEG_STATE_AN_ENABLE:
1843                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1844                 if (ap->flags & MR_AN_ENABLE) {
1845                         ap->link_time = 0;
1846                         ap->cur_time = 0;
1847                         ap->ability_match_cfg = 0;
1848                         ap->ability_match_count = 0;
1849                         ap->ability_match = 0;
1850                         ap->idle_match = 0;
1851                         ap->ack_match = 0;
1852
1853                         ap->state = ANEG_STATE_RESTART_INIT;
1854                 } else {
1855                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1856                 }
1857                 break;
1858
1859         case ANEG_STATE_RESTART_INIT:
1860                 ap->link_time = ap->cur_time;
1861                 ap->flags &= ~(MR_NP_LOADED);
1862                 ap->txconfig = 0;
1863                 tw32(MAC_TX_AUTO_NEG, 0);
1864                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1865                 tw32_f(MAC_MODE, tp->mac_mode);
1866                 udelay(40);
1867
1868                 ret = ANEG_TIMER_ENAB;
1869                 ap->state = ANEG_STATE_RESTART;
1870
1871                 /* fallthru */
1872         case ANEG_STATE_RESTART:
1873                 delta = ap->cur_time - ap->link_time;
1874                 if (delta > ANEG_STATE_SETTLE_TIME) {
1875                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1876                 } else {
1877                         ret = ANEG_TIMER_ENAB;
1878                 }
1879                 break;
1880
1881         case ANEG_STATE_DISABLE_LINK_OK:
1882                 ret = ANEG_DONE;
1883                 break;
1884
1885         case ANEG_STATE_ABILITY_DETECT_INIT:
1886                 ap->flags &= ~(MR_TOGGLE_TX);
1887                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1888                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1889                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1890                 tw32_f(MAC_MODE, tp->mac_mode);
1891                 udelay(40);
1892
1893                 ap->state = ANEG_STATE_ABILITY_DETECT;
1894                 break;
1895
1896         case ANEG_STATE_ABILITY_DETECT:
1897                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1898                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1899                 }
1900                 break;
1901
1902         case ANEG_STATE_ACK_DETECT_INIT:
1903                 ap->txconfig |= ANEG_CFG_ACK;
1904                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1905                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1906                 tw32_f(MAC_MODE, tp->mac_mode);
1907                 udelay(40);
1908
1909                 ap->state = ANEG_STATE_ACK_DETECT;
1910
1911                 /* fallthru */
1912         case ANEG_STATE_ACK_DETECT:
1913                 if (ap->ack_match != 0) {
1914                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1915                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1916                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1917                         } else {
1918                                 ap->state = ANEG_STATE_AN_ENABLE;
1919                         }
1920                 } else if (ap->ability_match != 0 &&
1921                            ap->rxconfig == 0) {
1922                         ap->state = ANEG_STATE_AN_ENABLE;
1923                 }
1924                 break;
1925
1926         case ANEG_STATE_COMPLETE_ACK_INIT:
1927                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1928                         ret = ANEG_FAILED;
1929                         break;
1930                 }
1931                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1932                                MR_LP_ADV_HALF_DUPLEX |
1933                                MR_LP_ADV_SYM_PAUSE |
1934                                MR_LP_ADV_ASYM_PAUSE |
1935                                MR_LP_ADV_REMOTE_FAULT1 |
1936                                MR_LP_ADV_REMOTE_FAULT2 |
1937                                MR_LP_ADV_NEXT_PAGE |
1938                                MR_TOGGLE_RX |
1939                                MR_NP_RX);
1940                 if (ap->rxconfig & ANEG_CFG_FD)
1941                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1942                 if (ap->rxconfig & ANEG_CFG_HD)
1943                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1944                 if (ap->rxconfig & ANEG_CFG_PS1)
1945                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1946                 if (ap->rxconfig & ANEG_CFG_PS2)
1947                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1948                 if (ap->rxconfig & ANEG_CFG_RF1)
1949                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1950                 if (ap->rxconfig & ANEG_CFG_RF2)
1951                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1952                 if (ap->rxconfig & ANEG_CFG_NP)
1953                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1954
1955                 ap->link_time = ap->cur_time;
1956
1957                 ap->flags ^= (MR_TOGGLE_TX);
1958                 if (ap->rxconfig & 0x0008)
1959                         ap->flags |= MR_TOGGLE_RX;
1960                 if (ap->rxconfig & ANEG_CFG_NP)
1961                         ap->flags |= MR_NP_RX;
1962                 ap->flags |= MR_PAGE_RX;
1963
1964                 ap->state = ANEG_STATE_COMPLETE_ACK;
1965                 ret = ANEG_TIMER_ENAB;
1966                 break;
1967
1968         case ANEG_STATE_COMPLETE_ACK:
1969                 if (ap->ability_match != 0 &&
1970                     ap->rxconfig == 0) {
1971                         ap->state = ANEG_STATE_AN_ENABLE;
1972                         break;
1973                 }
1974                 delta = ap->cur_time - ap->link_time;
1975                 if (delta > ANEG_STATE_SETTLE_TIME) {
1976                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1977                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1978                         } else {
1979                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1980                                     !(ap->flags & MR_NP_RX)) {
1981                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1982                                 } else {
1983                                         ret = ANEG_FAILED;
1984                                 }
1985                         }
1986                 }
1987                 break;
1988
1989         case ANEG_STATE_IDLE_DETECT_INIT:
1990                 ap->link_time = ap->cur_time;
1991                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994
1995                 ap->state = ANEG_STATE_IDLE_DETECT;
1996                 ret = ANEG_TIMER_ENAB;
1997                 break;
1998
1999         case ANEG_STATE_IDLE_DETECT:
2000                 if (ap->ability_match != 0 &&
2001                     ap->rxconfig == 0) {
2002                         ap->state = ANEG_STATE_AN_ENABLE;
2003                         break;
2004                 }
2005                 delta = ap->cur_time - ap->link_time;
2006                 if (delta > ANEG_STATE_SETTLE_TIME) {
2007                         /* XXX another gem from the Broadcom driver :( */
2008                         ap->state = ANEG_STATE_LINK_OK;
2009                 }
2010                 break;
2011
2012         case ANEG_STATE_LINK_OK:
2013                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2014                 ret = ANEG_DONE;
2015                 break;
2016
2017         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2018                 /* ??? unimplemented */
2019                 break;
2020
2021         case ANEG_STATE_NEXT_PAGE_WAIT:
2022                 /* ??? unimplemented */
2023                 break;
2024
2025         default:
2026                 ret = ANEG_FAILED;
2027                 break;
2028         };
2029
2030         return ret;
2031 }
2032
2033 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2034 {
2035         int res = 0;
2036         struct tg3_fiber_aneginfo aninfo;
2037         int status = ANEG_FAILED;
2038         unsigned int tick;
2039         u32 tmp;
2040
2041         tw32_f(MAC_TX_AUTO_NEG, 0);
2042
2043         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2044         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2045         udelay(40);
2046
2047         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2048         udelay(40);
2049
2050         memset(&aninfo, 0, sizeof(aninfo));
2051         aninfo.flags |= MR_AN_ENABLE;
2052         aninfo.state = ANEG_STATE_UNKNOWN;
2053         aninfo.cur_time = 0;
2054         tick = 0;
2055         while (++tick < 195000) {
2056                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2057                 if (status == ANEG_DONE || status == ANEG_FAILED)
2058                         break;
2059
2060                 udelay(1);
2061         }
2062
2063         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2064         tw32_f(MAC_MODE, tp->mac_mode);
2065         udelay(40);
2066
2067         *flags = aninfo.flags;
2068
2069         if (status == ANEG_DONE &&
2070             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2071                              MR_LP_ADV_FULL_DUPLEX)))
2072                 res = 1;
2073
2074         return res;
2075 }
2076
2077 static void tg3_init_bcm8002(struct tg3 *tp)
2078 {
2079         u32 mac_status = tr32(MAC_STATUS);
2080         int i;
2081
2082         /* Reset when initting first time or we have a link. */
2083         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2084             !(mac_status & MAC_STATUS_PCS_SYNCED))
2085                 return;
2086
2087         /* Set PLL lock range. */
2088         tg3_writephy(tp, 0x16, 0x8007);
2089
2090         /* SW reset */
2091         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2092
2093         /* Wait for reset to complete. */
2094         /* XXX schedule_timeout() ... */
2095         for (i = 0; i < 500; i++)
2096                 udelay(10);
2097
2098         /* Config mode; select PMA/Ch 1 regs. */
2099         tg3_writephy(tp, 0x10, 0x8411);
2100
2101         /* Enable auto-lock and comdet, select txclk for tx. */
2102         tg3_writephy(tp, 0x11, 0x0a10);
2103
2104         tg3_writephy(tp, 0x18, 0x00a0);
2105         tg3_writephy(tp, 0x16, 0x41ff);
2106
2107         /* Assert and deassert POR. */
2108         tg3_writephy(tp, 0x13, 0x0400);
2109         udelay(40);
2110         tg3_writephy(tp, 0x13, 0x0000);
2111
2112         tg3_writephy(tp, 0x11, 0x0a50);
2113         udelay(40);
2114         tg3_writephy(tp, 0x11, 0x0a10);
2115
2116         /* Wait for signal to stabilize */
2117         /* XXX schedule_timeout() ... */
2118         for (i = 0; i < 15000; i++)
2119                 udelay(10);
2120
2121         /* Deselect the channel register so we can read the PHYID
2122          * later.
2123          */
2124         tg3_writephy(tp, 0x10, 0x8011);
2125 }
2126
2127 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2128 {
2129         u32 sg_dig_ctrl, sg_dig_status;
2130         u32 serdes_cfg, expected_sg_dig_ctrl;
2131         int workaround, port_a;
2132         int current_link_up;
2133
2134         serdes_cfg = 0;
2135         expected_sg_dig_ctrl = 0;
2136         workaround = 0;
2137         port_a = 1;
2138         current_link_up = 0;
2139
2140         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2141             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2142                 workaround = 1;
2143                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2144                         port_a = 0;
2145
2146                 serdes_cfg = tr32(MAC_SERDES_CFG) &
2147                         ((1 << 23) | (1 << 22) | (1 << 21) | (1 << 20));
2148         }
2149
2150         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2151
2152         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2153                 if (sg_dig_ctrl & (1 << 31)) {
2154                         if (workaround) {
2155                                 u32 val = serdes_cfg;
2156
2157                                 if (port_a)
2158                                         val |= 0xc010880;
2159                                 else
2160                                         val |= 0x4010880;
2161                                 tw32_f(MAC_SERDES_CFG, val);
2162                         }
2163                         tw32_f(SG_DIG_CTRL, 0x01388400);
2164                 }
2165                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2166                         tg3_setup_flow_control(tp, 0, 0);
2167                         current_link_up = 1;
2168                 }
2169                 goto out;
2170         }
2171
2172         /* Want auto-negotiation.  */
2173         expected_sg_dig_ctrl = 0x81388400;
2174
2175         /* Pause capability */
2176         expected_sg_dig_ctrl |= (1 << 11);
2177
2178         /* Asymettric pause */
2179         expected_sg_dig_ctrl |= (1 << 12);
2180
2181         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2182                 if (workaround)
2183                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011880);
2184                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2185                 udelay(5);
2186                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2187
2188                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2189         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2190                                  MAC_STATUS_SIGNAL_DET)) {
2191                 int i;
2192
2193                 /* Giver time to negotiate (~200ms) */
2194                 for (i = 0; i < 40000; i++) {
2195                         sg_dig_status = tr32(SG_DIG_STATUS);
2196                         if (sg_dig_status & (0x3))
2197                                 break;
2198                         udelay(5);
2199                 }
2200                 mac_status = tr32(MAC_STATUS);
2201
2202                 if ((sg_dig_status & (1 << 1)) &&
2203                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2204                         u32 local_adv, remote_adv;
2205
2206                         local_adv = ADVERTISE_PAUSE_CAP;
2207                         remote_adv = 0;
2208                         if (sg_dig_status & (1 << 19))
2209                                 remote_adv |= LPA_PAUSE_CAP;
2210                         if (sg_dig_status & (1 << 20))
2211                                 remote_adv |= LPA_PAUSE_ASYM;
2212
2213                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2214                         current_link_up = 1;
2215                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2216                 } else if (!(sg_dig_status & (1 << 1))) {
2217                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2218                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2219                         else {
2220                                 if (workaround) {
2221                                         u32 val = serdes_cfg;
2222
2223                                         if (port_a)
2224                                                 val |= 0xc010880;
2225                                         else
2226                                                 val |= 0x4010880;
2227
2228                                         tw32_f(MAC_SERDES_CFG, val);
2229                                 }
2230
2231                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2232                                 udelay(40);
2233
2234                                 mac_status = tr32(MAC_STATUS);
2235                                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2236                                         tg3_setup_flow_control(tp, 0, 0);
2237                                         current_link_up = 1;
2238                                 }
2239                         }
2240                 }
2241         }
2242
2243 out:
2244         return current_link_up;
2245 }
2246
2247 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2248 {
2249         int current_link_up = 0;
2250
2251         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2252                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2253                 goto out;
2254         }
2255
2256         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2257                 u32 flags;
2258                 int i;
2259   
2260                 if (fiber_autoneg(tp, &flags)) {
2261                         u32 local_adv, remote_adv;
2262
2263                         local_adv = ADVERTISE_PAUSE_CAP;
2264                         remote_adv = 0;
2265                         if (flags & MR_LP_ADV_SYM_PAUSE)
2266                                 remote_adv |= LPA_PAUSE_CAP;
2267                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2268                                 remote_adv |= LPA_PAUSE_ASYM;
2269
2270                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2271
2272                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2273                         current_link_up = 1;
2274                 }
2275                 for (i = 0; i < 30; i++) {
2276                         udelay(20);
2277                         tw32_f(MAC_STATUS,
2278                                (MAC_STATUS_SYNC_CHANGED |
2279                                 MAC_STATUS_CFG_CHANGED));
2280                         udelay(40);
2281                         if ((tr32(MAC_STATUS) &
2282                              (MAC_STATUS_SYNC_CHANGED |
2283                               MAC_STATUS_CFG_CHANGED)) == 0)
2284                                 break;
2285                 }
2286
2287                 mac_status = tr32(MAC_STATUS);
2288                 if (current_link_up == 0 &&
2289                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2290                     !(mac_status & MAC_STATUS_RCVD_CFG))
2291                         current_link_up = 1;
2292         } else {
2293                 /* Forcing 1000FD link up. */
2294                 current_link_up = 1;
2295                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2296
2297                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2298                 udelay(40);
2299         }
2300
2301 out:
2302         return current_link_up;
2303 }
2304
2305 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2306 {
2307         u32 orig_pause_cfg;
2308         u16 orig_active_speed;
2309         u8 orig_active_duplex;
2310         u32 mac_status;
2311         int current_link_up;
2312         int i;
2313
2314         orig_pause_cfg =
2315                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2316                                   TG3_FLAG_TX_PAUSE));
2317         orig_active_speed = tp->link_config.active_speed;
2318         orig_active_duplex = tp->link_config.active_duplex;
2319
2320         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2321             netif_carrier_ok(tp->dev) &&
2322             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2323                 mac_status = tr32(MAC_STATUS);
2324                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2325                                MAC_STATUS_SIGNAL_DET |
2326                                MAC_STATUS_CFG_CHANGED |
2327                                MAC_STATUS_RCVD_CFG);
2328                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2329                                    MAC_STATUS_SIGNAL_DET)) {
2330                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2331                                             MAC_STATUS_CFG_CHANGED));
2332                         return 0;
2333                 }
2334         }
2335
2336         tw32_f(MAC_TX_AUTO_NEG, 0);
2337
2338         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2339         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2340         tw32_f(MAC_MODE, tp->mac_mode);
2341         udelay(40);
2342
2343         if (tp->phy_id == PHY_ID_BCM8002)
2344                 tg3_init_bcm8002(tp);
2345
2346         /* Enable link change event even when serdes polling.  */
2347         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2348         udelay(40);
2349
2350         current_link_up = 0;
2351         mac_status = tr32(MAC_STATUS);
2352
2353         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2354                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2355         else
2356                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2357
2358         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2359         tw32_f(MAC_MODE, tp->mac_mode);
2360         udelay(40);
2361
2362         tp->hw_status->status =
2363                 (SD_STATUS_UPDATED |
2364                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2365
2366         for (i = 0; i < 100; i++) {
2367                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2368                                     MAC_STATUS_CFG_CHANGED));
2369                 udelay(5);
2370                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2371                                          MAC_STATUS_CFG_CHANGED)) == 0)
2372                         break;
2373         }
2374
2375         mac_status = tr32(MAC_STATUS);
2376         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2377                 current_link_up = 0;
2378                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2379                         tw32_f(MAC_MODE, (tp->mac_mode |
2380                                           MAC_MODE_SEND_CONFIGS));
2381                         udelay(1);
2382                         tw32_f(MAC_MODE, tp->mac_mode);
2383                 }
2384         }
2385
2386         if (current_link_up == 1) {
2387                 tp->link_config.active_speed = SPEED_1000;
2388                 tp->link_config.active_duplex = DUPLEX_FULL;
2389                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2390                                     LED_CTRL_LNKLED_OVERRIDE |
2391                                     LED_CTRL_1000MBPS_ON));
2392         } else {
2393                 tp->link_config.active_speed = SPEED_INVALID;
2394                 tp->link_config.active_duplex = DUPLEX_INVALID;
2395                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2396                                     LED_CTRL_LNKLED_OVERRIDE |
2397                                     LED_CTRL_TRAFFIC_OVERRIDE));
2398         }
2399
2400         if (current_link_up != netif_carrier_ok(tp->dev)) {
2401                 if (current_link_up)
2402                         netif_carrier_on(tp->dev);
2403                 else
2404                         netif_carrier_off(tp->dev);
2405                 tg3_link_report(tp);
2406         } else {
2407                 u32 now_pause_cfg =
2408                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2409                                          TG3_FLAG_TX_PAUSE);
2410                 if (orig_pause_cfg != now_pause_cfg ||
2411                     orig_active_speed != tp->link_config.active_speed ||
2412                     orig_active_duplex != tp->link_config.active_duplex)
2413                         tg3_link_report(tp);
2414         }
2415
2416         return 0;
2417 }
2418
2419 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2420 {
2421         int err;
2422
2423         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2424                 err = tg3_setup_fiber_phy(tp, force_reset);
2425         } else {
2426                 err = tg3_setup_copper_phy(tp, force_reset);
2427         }
2428
2429         if (tp->link_config.active_speed == SPEED_1000 &&
2430             tp->link_config.active_duplex == DUPLEX_HALF)
2431                 tw32(MAC_TX_LENGTHS,
2432                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2433                       (6 << TX_LENGTHS_IPG_SHIFT) |
2434                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2435         else
2436                 tw32(MAC_TX_LENGTHS,
2437                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2438                       (6 << TX_LENGTHS_IPG_SHIFT) |
2439                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2440
2441         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2442             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2443                 if (netif_carrier_ok(tp->dev)) {
2444                         tw32(HOSTCC_STAT_COAL_TICKS,
2445                              DEFAULT_STAT_COAL_TICKS);
2446                 } else {
2447                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2448                 }
2449         }
2450
2451         return err;
2452 }
2453
2454 /* Tigon3 never reports partial packet sends.  So we do not
2455  * need special logic to handle SKBs that have not had all
2456  * of their frags sent yet, like SunGEM does.
2457  */
2458 static void tg3_tx(struct tg3 *tp)
2459 {
2460         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2461         u32 sw_idx = tp->tx_cons;
2462
2463         while (sw_idx != hw_idx) {
2464                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2465                 struct sk_buff *skb = ri->skb;
2466                 int i;
2467
2468                 if (unlikely(skb == NULL))
2469                         BUG();
2470
2471                 pci_unmap_single(tp->pdev,
2472                                  pci_unmap_addr(ri, mapping),
2473                                  skb_headlen(skb),
2474                                  PCI_DMA_TODEVICE);
2475
2476                 ri->skb = NULL;
2477
2478                 sw_idx = NEXT_TX(sw_idx);
2479
2480                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2481                         if (unlikely(sw_idx == hw_idx))
2482                                 BUG();
2483
2484                         ri = &tp->tx_buffers[sw_idx];
2485                         if (unlikely(ri->skb != NULL))
2486                                 BUG();
2487
2488                         pci_unmap_page(tp->pdev,
2489                                        pci_unmap_addr(ri, mapping),
2490                                        skb_shinfo(skb)->frags[i].size,
2491                                        PCI_DMA_TODEVICE);
2492
2493                         sw_idx = NEXT_TX(sw_idx);
2494                 }
2495
2496                 dev_kfree_skb_irq(skb);
2497         }
2498
2499         tp->tx_cons = sw_idx;
2500
2501         if (netif_queue_stopped(tp->dev) &&
2502             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2503                 netif_wake_queue(tp->dev);
2504 }
2505
2506 /* Returns size of skb allocated or < 0 on error.
2507  *
2508  * We only need to fill in the address because the other members
2509  * of the RX descriptor are invariant, see tg3_init_rings.
2510  *
2511  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2512  * posting buffers we only dirty the first cache line of the RX
2513  * descriptor (containing the address).  Whereas for the RX status
2514  * buffers the cpu only reads the last cacheline of the RX descriptor
2515  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2516  */
2517 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2518                             int src_idx, u32 dest_idx_unmasked)
2519 {
2520         struct tg3_rx_buffer_desc *desc;
2521         struct ring_info *map, *src_map;
2522         struct sk_buff *skb;
2523         dma_addr_t mapping;
2524         int skb_size, dest_idx;
2525
2526         src_map = NULL;
2527         switch (opaque_key) {
2528         case RXD_OPAQUE_RING_STD:
2529                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2530                 desc = &tp->rx_std[dest_idx];
2531                 map = &tp->rx_std_buffers[dest_idx];
2532                 if (src_idx >= 0)
2533                         src_map = &tp->rx_std_buffers[src_idx];
2534                 skb_size = RX_PKT_BUF_SZ;
2535                 break;
2536
2537         case RXD_OPAQUE_RING_JUMBO:
2538                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2539                 desc = &tp->rx_jumbo[dest_idx];
2540                 map = &tp->rx_jumbo_buffers[dest_idx];
2541                 if (src_idx >= 0)
2542                         src_map = &tp->rx_jumbo_buffers[src_idx];
2543                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2544                 break;
2545
2546         default:
2547                 return -EINVAL;
2548         };
2549
2550         /* Do not overwrite any of the map or rp information
2551          * until we are sure we can commit to a new buffer.
2552          *
2553          * Callers depend upon this behavior and assume that
2554          * we leave everything unchanged if we fail.
2555          */
2556         skb = dev_alloc_skb(skb_size);
2557         if (skb == NULL)
2558                 return -ENOMEM;
2559
2560         skb->dev = tp->dev;
2561         skb_reserve(skb, tp->rx_offset);
2562
2563         mapping = pci_map_single(tp->pdev, skb->data,
2564                                  skb_size - tp->rx_offset,
2565                                  PCI_DMA_FROMDEVICE);
2566
2567         map->skb = skb;
2568         pci_unmap_addr_set(map, mapping, mapping);
2569
2570         if (src_map != NULL)
2571                 src_map->skb = NULL;
2572
2573         desc->addr_hi = ((u64)mapping >> 32);
2574         desc->addr_lo = ((u64)mapping & 0xffffffff);
2575
2576         return skb_size;
2577 }
2578
2579 /* We only need to move over in the address because the other
2580  * members of the RX descriptor are invariant.  See notes above
2581  * tg3_alloc_rx_skb for full details.
2582  */
2583 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2584                            int src_idx, u32 dest_idx_unmasked)
2585 {
2586         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2587         struct ring_info *src_map, *dest_map;
2588         int dest_idx;
2589
2590         switch (opaque_key) {
2591         case RXD_OPAQUE_RING_STD:
2592                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2593                 dest_desc = &tp->rx_std[dest_idx];
2594                 dest_map = &tp->rx_std_buffers[dest_idx];
2595                 src_desc = &tp->rx_std[src_idx];
2596                 src_map = &tp->rx_std_buffers[src_idx];
2597                 break;
2598
2599         case RXD_OPAQUE_RING_JUMBO:
2600                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2601                 dest_desc = &tp->rx_jumbo[dest_idx];
2602                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2603                 src_desc = &tp->rx_jumbo[src_idx];
2604                 src_map = &tp->rx_jumbo_buffers[src_idx];
2605                 break;
2606
2607         default:
2608                 return;
2609         };
2610
2611         dest_map->skb = src_map->skb;
2612         pci_unmap_addr_set(dest_map, mapping,
2613                            pci_unmap_addr(src_map, mapping));
2614         dest_desc->addr_hi = src_desc->addr_hi;
2615         dest_desc->addr_lo = src_desc->addr_lo;
2616
2617         src_map->skb = NULL;
2618 }
2619
2620 #if TG3_VLAN_TAG_USED
2621 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2622 {
2623         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2624 }
2625 #endif
2626
2627 /* The RX ring scheme is composed of multiple rings which post fresh
2628  * buffers to the chip, and one special ring the chip uses to report
2629  * status back to the host.
2630  *
2631  * The special ring reports the status of received packets to the
2632  * host.  The chip does not write into the original descriptor the
2633  * RX buffer was obtained from.  The chip simply takes the original
2634  * descriptor as provided by the host, updates the status and length
2635  * field, then writes this into the next status ring entry.
2636  *
2637  * Each ring the host uses to post buffers to the chip is described
2638  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2639  * it is first placed into the on-chip ram.  When the packet's length
2640  * is known, it walks down the TG3_BDINFO entries to select the ring.
2641  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2642  * which is within the range of the new packet's length is chosen.
2643  *
2644  * The "separate ring for rx status" scheme may sound queer, but it makes
2645  * sense from a cache coherency perspective.  If only the host writes
2646  * to the buffer post rings, and only the chip writes to the rx status
2647  * rings, then cache lines never move beyond shared-modified state.
2648  * If both the host and chip were to write into the same ring, cache line
2649  * eviction could occur since both entities want it in an exclusive state.
2650  */
2651 static int tg3_rx(struct tg3 *tp, int budget)
2652 {
2653         u32 work_mask;
2654         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2655         u16 hw_idx, sw_idx;
2656         int received;
2657
2658         hw_idx = tp->hw_status->idx[0].rx_producer;
2659         /*
2660          * We need to order the read of hw_idx and the read of
2661          * the opaque cookie.
2662          */
2663         rmb();
2664         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2665         work_mask = 0;
2666         received = 0;
2667         while (sw_idx != hw_idx && budget > 0) {
2668                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2669                 unsigned int len;
2670                 struct sk_buff *skb;
2671                 dma_addr_t dma_addr;
2672                 u32 opaque_key, desc_idx, *post_ptr;
2673
2674                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2675                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2676                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2677                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2678                                                   mapping);
2679                         skb = tp->rx_std_buffers[desc_idx].skb;
2680                         post_ptr = &tp->rx_std_ptr;
2681                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2682                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2683                                                   mapping);
2684                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2685                         post_ptr = &tp->rx_jumbo_ptr;
2686                 }
2687                 else {
2688                         goto next_pkt_nopost;
2689                 }
2690
2691                 work_mask |= opaque_key;
2692
2693                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2694                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2695                 drop_it:
2696                         tg3_recycle_rx(tp, opaque_key,
2697                                        desc_idx, *post_ptr);
2698                 drop_it_no_recycle:
2699                         /* Other statistics kept track of by card. */
2700                         tp->net_stats.rx_dropped++;
2701                         goto next_pkt;
2702                 }
2703
2704                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2705
2706                 if (len > RX_COPY_THRESHOLD) {
2707                         int skb_size;
2708
2709                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2710                                                     desc_idx, *post_ptr);
2711                         if (skb_size < 0)
2712                                 goto drop_it;
2713
2714                         pci_unmap_single(tp->pdev, dma_addr,
2715                                          skb_size - tp->rx_offset,
2716                                          PCI_DMA_FROMDEVICE);
2717
2718                         skb_put(skb, len);
2719                 } else {
2720                         struct sk_buff *copy_skb;
2721
2722                         tg3_recycle_rx(tp, opaque_key,
2723                                        desc_idx, *post_ptr);
2724
2725                         copy_skb = dev_alloc_skb(len + 2);
2726                         if (copy_skb == NULL)
2727                                 goto drop_it_no_recycle;
2728
2729                         copy_skb->dev = tp->dev;
2730                         skb_reserve(copy_skb, 2);
2731                         skb_put(copy_skb, len);
2732                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2733                         memcpy(copy_skb->data, skb->data, len);
2734                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2735
2736                         /* We'll reuse the original ring buffer. */
2737                         skb = copy_skb;
2738                 }
2739
2740                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2741                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2742                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2743                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2744                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2745                 else
2746                         skb->ip_summed = CHECKSUM_NONE;
2747
2748                 skb->protocol = eth_type_trans(skb, tp->dev);
2749 #if TG3_VLAN_TAG_USED
2750                 if (tp->vlgrp != NULL &&
2751                     desc->type_flags & RXD_FLAG_VLAN) {
2752                         tg3_vlan_rx(tp, skb,
2753                                     desc->err_vlan & RXD_VLAN_MASK);
2754                 } else
2755 #endif
2756                         netif_receive_skb(skb);
2757
2758                 tp->dev->last_rx = jiffies;
2759                 received++;
2760                 budget--;
2761
2762 next_pkt:
2763                 (*post_ptr)++;
2764 next_pkt_nopost:
2765                 rx_rcb_ptr++;
2766                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2767         }
2768
2769         /* ACK the status ring. */
2770         tp->rx_rcb_ptr = rx_rcb_ptr;
2771         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2772                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2773
2774         /* Refill RX ring(s). */
2775         if (work_mask & RXD_OPAQUE_RING_STD) {
2776                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2777                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2778                              sw_idx);
2779         }
2780         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2781                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2782                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2783                              sw_idx);
2784         }
2785         mmiowb();
2786
2787         return received;
2788 }
2789
2790 static int tg3_poll(struct net_device *netdev, int *budget)
2791 {
2792         struct tg3 *tp = netdev_priv(netdev);
2793         struct tg3_hw_status *sblk = tp->hw_status;
2794         unsigned long flags;
2795         int done;
2796
2797         spin_lock_irqsave(&tp->lock, flags);
2798
2799         /* handle link change and other phy events */
2800         if (!(tp->tg3_flags &
2801               (TG3_FLAG_USE_LINKCHG_REG |
2802                TG3_FLAG_POLL_SERDES))) {
2803                 if (sblk->status & SD_STATUS_LINK_CHG) {
2804                         sblk->status = SD_STATUS_UPDATED |
2805                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2806                         tg3_setup_phy(tp, 0);
2807                 }
2808         }
2809
2810         /* run TX completion thread */
2811         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2812                 spin_lock(&tp->tx_lock);
2813                 tg3_tx(tp);
2814                 spin_unlock(&tp->tx_lock);
2815         }
2816
2817         spin_unlock_irqrestore(&tp->lock, flags);
2818
2819         /* run RX thread, within the bounds set by NAPI.
2820          * All RX "locking" is done by ensuring outside
2821          * code synchronizes with dev->poll()
2822          */
2823         done = 1;
2824         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2825                 int orig_budget = *budget;
2826                 int work_done;
2827
2828                 if (orig_budget > netdev->quota)
2829                         orig_budget = netdev->quota;
2830
2831                 work_done = tg3_rx(tp, orig_budget);
2832
2833                 *budget -= work_done;
2834                 netdev->quota -= work_done;
2835
2836                 if (work_done >= orig_budget)
2837                         done = 0;
2838         }
2839
2840         /* if no more work, tell net stack and NIC we're done */
2841         if (done) {
2842                 spin_lock_irqsave(&tp->lock, flags);
2843                 __netif_rx_complete(netdev);
2844                 tg3_restart_ints(tp);
2845                 spin_unlock_irqrestore(&tp->lock, flags);
2846         }
2847
2848         return (done ? 0 : 1);
2849 }
2850
2851 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2852 {
2853         struct tg3_hw_status *sblk = tp->hw_status;
2854         unsigned int work_exists = 0;
2855
2856         /* check for phy events */
2857         if (!(tp->tg3_flags &
2858               (TG3_FLAG_USE_LINKCHG_REG |
2859                TG3_FLAG_POLL_SERDES))) {
2860                 if (sblk->status & SD_STATUS_LINK_CHG)
2861                         work_exists = 1;
2862         }
2863         /* check for RX/TX work to do */
2864         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2865             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2866                 work_exists = 1;
2867
2868         return work_exists;
2869 }
2870
2871 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2872 {
2873         struct net_device *dev = dev_id;
2874         struct tg3 *tp = netdev_priv(dev);
2875         struct tg3_hw_status *sblk = tp->hw_status;
2876         unsigned long flags;
2877         unsigned int handled = 1;
2878
2879         spin_lock_irqsave(&tp->lock, flags);
2880
2881         if (sblk->status & SD_STATUS_UPDATED) {
2882                 /*
2883                  * writing any value to intr-mbox-0 clears PCI INTA# and
2884                  * chip-internal interrupt pending events.
2885                  * writing non-zero to intr-mbox-0 additional tells the
2886                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2887                  * event coalescing.
2888                  */
2889                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2890                              0x00000001);
2891                 /*
2892                  * Flush PCI write.  This also guarantees that our
2893                  * status block has been flushed to host memory.
2894                  */
2895                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2896                 sblk->status &= ~SD_STATUS_UPDATED;
2897
2898                 if (likely(tg3_has_work(dev, tp)))
2899                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2900                 else {
2901                         /* no work, shared interrupt perhaps?  re-enable
2902                          * interrupts, and flush that PCI write
2903                          */
2904                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2905                                 0x00000000);
2906                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2907                 }
2908         } else {        /* shared interrupt */
2909                 handled = 0;
2910         }
2911
2912         spin_unlock_irqrestore(&tp->lock, flags);
2913
2914         return IRQ_RETVAL(handled);
2915 }
2916
2917 static int tg3_init_hw(struct tg3 *);
2918 static int tg3_halt(struct tg3 *);
2919
2920 #ifdef CONFIG_NET_POLL_CONTROLLER
2921 static void tg3_poll_controller(struct net_device *dev)
2922 {
2923         tg3_interrupt(dev->irq, dev, NULL);
2924 }
2925 #endif
2926
2927 static void tg3_reset_task(void *_data)
2928 {
2929         struct tg3 *tp = _data;
2930         unsigned int restart_timer;
2931
2932         tg3_netif_stop(tp);
2933
2934         spin_lock_irq(&tp->lock);
2935         spin_lock(&tp->tx_lock);
2936
2937         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2938         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2939
2940         tg3_halt(tp);
2941         tg3_init_hw(tp);
2942
2943         tg3_netif_start(tp);
2944
2945         spin_unlock(&tp->tx_lock);
2946         spin_unlock_irq(&tp->lock);
2947
2948         if (restart_timer)
2949                 mod_timer(&tp->timer, jiffies + 1);
2950 }
2951
2952 static void tg3_tx_timeout(struct net_device *dev)
2953 {
2954         struct tg3 *tp = netdev_priv(dev);
2955
2956         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2957                dev->name);
2958
2959         schedule_work(&tp->reset_task);
2960 }
2961
2962 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2963
2964 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2965                                        u32 guilty_entry, int guilty_len,
2966                                        u32 last_plus_one, u32 *start, u32 mss)
2967 {
2968         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2969         dma_addr_t new_addr;
2970         u32 entry = *start;
2971         int i;
2972
2973         if (!new_skb) {
2974                 dev_kfree_skb(skb);
2975                 return -1;
2976         }
2977
2978         /* New SKB is guaranteed to be linear. */
2979         entry = *start;
2980         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2981                                   PCI_DMA_TODEVICE);
2982         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2983                     (skb->ip_summed == CHECKSUM_HW) ?
2984                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2985         *start = NEXT_TX(entry);
2986
2987         /* Now clean up the sw ring entries. */
2988         i = 0;
2989         while (entry != last_plus_one) {
2990                 int len;
2991
2992                 if (i == 0)
2993                         len = skb_headlen(skb);
2994                 else
2995                         len = skb_shinfo(skb)->frags[i-1].size;
2996                 pci_unmap_single(tp->pdev,
2997                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2998                                  len, PCI_DMA_TODEVICE);
2999                 if (i == 0) {
3000                         tp->tx_buffers[entry].skb = new_skb;
3001                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3002                 } else {
3003                         tp->tx_buffers[entry].skb = NULL;
3004                 }
3005                 entry = NEXT_TX(entry);
3006         }
3007
3008         dev_kfree_skb(skb);
3009
3010         return 0;
3011 }
3012
3013 static void tg3_set_txd(struct tg3 *tp, int entry,
3014                         dma_addr_t mapping, int len, u32 flags,
3015                         u32 mss_and_is_end)
3016 {
3017         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3018         int is_end = (mss_and_is_end & 0x1);
3019         u32 mss = (mss_and_is_end >> 1);
3020         u32 vlan_tag = 0;
3021
3022         if (is_end)
3023                 flags |= TXD_FLAG_END;
3024         if (flags & TXD_FLAG_VLAN) {
3025                 vlan_tag = flags >> 16;
3026                 flags &= 0xffff;
3027         }
3028         vlan_tag |= (mss << TXD_MSS_SHIFT);
3029
3030         txd->addr_hi = ((u64) mapping >> 32);
3031         txd->addr_lo = ((u64) mapping & 0xffffffff);
3032         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3033         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3034 }
3035
3036 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3037 {
3038         u32 base = (u32) mapping & 0xffffffff;
3039
3040         return ((base > 0xffffdcc0) &&
3041                 (base + len + 8 < base));
3042 }
3043
3044 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3045 {
3046         struct tg3 *tp = netdev_priv(dev);
3047         dma_addr_t mapping;
3048         unsigned int i;
3049         u32 len, entry, base_flags, mss;
3050         int would_hit_hwbug;
3051         unsigned long flags;
3052
3053         len = skb_headlen(skb);
3054
3055         /* No BH disabling for tx_lock here.  We are running in BH disabled
3056          * context and TX reclaim runs via tp->poll inside of a software
3057          * interrupt.  Rejoice!
3058          *
3059          * Actually, things are not so simple.  If we are to take a hw
3060          * IRQ here, we can deadlock, consider:
3061          *
3062          *       CPU1           CPU2
3063          *   tg3_start_xmit
3064          *   take tp->tx_lock
3065          *                      tg3_timer
3066          *                      take tp->lock
3067          *   tg3_interrupt
3068          *   spin on tp->lock
3069          *                      spin on tp->tx_lock
3070          *
3071          * So we really do need to disable interrupts when taking
3072          * tx_lock here.
3073          */
3074         local_irq_save(flags);
3075         if (!spin_trylock(&tp->tx_lock)) { 
3076                 local_irq_restore(flags);
3077                 return NETDEV_TX_LOCKED; 
3078         } 
3079
3080         /* This is a hard error, log it. */
3081         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3082                 netif_stop_queue(dev);
3083                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3084                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3085                        dev->name);
3086                 return NETDEV_TX_BUSY;
3087         }
3088
3089         entry = tp->tx_prod;
3090         base_flags = 0;
3091         if (skb->ip_summed == CHECKSUM_HW)
3092                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3093 #if TG3_TSO_SUPPORT != 0
3094         mss = 0;
3095         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3096             (mss = skb_shinfo(skb)->tso_size) != 0) {
3097                 int tcp_opt_len, ip_tcp_len;
3098
3099                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3100                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3101
3102                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3103                                TXD_FLAG_CPU_POST_DMA);
3104
3105                 skb->nh.iph->check = 0;
3106                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3107                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3108                                                       skb->nh.iph->daddr,
3109                                                       0, IPPROTO_TCP, 0);
3110
3111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3112                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3113                                 int tsflags;
3114
3115                                 tsflags = ((skb->nh.iph->ihl - 5) +
3116                                            (tcp_opt_len >> 2));
3117                                 mss |= (tsflags << 11);
3118                         }
3119                 } else {
3120                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3121                                 int tsflags;
3122
3123                                 tsflags = ((skb->nh.iph->ihl - 5) +
3124                                            (tcp_opt_len >> 2));
3125                                 base_flags |= tsflags << 12;
3126                         }
3127                 }
3128         }
3129 #else
3130         mss = 0;
3131 #endif
3132 #if TG3_VLAN_TAG_USED
3133         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3134                 base_flags |= (TXD_FLAG_VLAN |
3135                                (vlan_tx_tag_get(skb) << 16));
3136 #endif
3137
3138         /* Queue skb data, a.k.a. the main skb fragment. */
3139         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3140
3141         tp->tx_buffers[entry].skb = skb;
3142         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3143
3144         would_hit_hwbug = 0;
3145
3146         if (tg3_4g_overflow_test(mapping, len))
3147                 would_hit_hwbug = entry + 1;
3148
3149         tg3_set_txd(tp, entry, mapping, len, base_flags,
3150                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3151
3152         entry = NEXT_TX(entry);
3153
3154         /* Now loop through additional data fragments, and queue them. */
3155         if (skb_shinfo(skb)->nr_frags > 0) {
3156                 unsigned int i, last;
3157
3158                 last = skb_shinfo(skb)->nr_frags - 1;
3159                 for (i = 0; i <= last; i++) {
3160                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3161
3162                         len = frag->size;
3163                         mapping = pci_map_page(tp->pdev,
3164                                                frag->page,
3165                                                frag->page_offset,
3166                                                len, PCI_DMA_TODEVICE);
3167
3168                         tp->tx_buffers[entry].skb = NULL;
3169                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3170
3171                         if (tg3_4g_overflow_test(mapping, len)) {
3172                                 /* Only one should match. */
3173                                 if (would_hit_hwbug)
3174                                         BUG();
3175                                 would_hit_hwbug = entry + 1;
3176                         }
3177
3178                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3179                                 tg3_set_txd(tp, entry, mapping, len,
3180                                             base_flags, (i == last)|(mss << 1));
3181                         else
3182                                 tg3_set_txd(tp, entry, mapping, len,
3183                                             base_flags, (i == last));
3184
3185                         entry = NEXT_TX(entry);
3186                 }
3187         }
3188
3189         if (would_hit_hwbug) {
3190                 u32 last_plus_one = entry;
3191                 u32 start;
3192                 unsigned int len = 0;
3193
3194                 would_hit_hwbug -= 1;
3195                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3196                 entry &= (TG3_TX_RING_SIZE - 1);
3197                 start = entry;
3198                 i = 0;
3199                 while (entry != last_plus_one) {
3200                         if (i == 0)
3201                                 len = skb_headlen(skb);
3202                         else
3203                                 len = skb_shinfo(skb)->frags[i-1].size;
3204
3205                         if (entry == would_hit_hwbug)
3206                                 break;
3207
3208                         i++;
3209                         entry = NEXT_TX(entry);
3210
3211                 }
3212
3213                 /* If the workaround fails due to memory/mapping
3214                  * failure, silently drop this packet.
3215                  */
3216                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3217                                                 entry, len,
3218                                                 last_plus_one,
3219                                                 &start, mss))
3220                         goto out_unlock;
3221
3222                 entry = start;
3223         }
3224
3225         /* Packets are ready, update Tx producer idx local and on card. */
3226         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3227
3228         tp->tx_prod = entry;
3229         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3230                 netif_stop_queue(dev);
3231
3232 out_unlock:
3233         mmiowb();
3234         spin_unlock_irqrestore(&tp->tx_lock, flags);
3235
3236         dev->trans_start = jiffies;
3237
3238         return NETDEV_TX_OK;
3239 }
3240
3241 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3242                                int new_mtu)
3243 {
3244         dev->mtu = new_mtu;
3245
3246         if (new_mtu > ETH_DATA_LEN)
3247                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3248         else
3249                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3250 }
3251
3252 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3253 {
3254         struct tg3 *tp = netdev_priv(dev);
3255
3256         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3257                 return -EINVAL;
3258
3259         if (!netif_running(dev)) {
3260                 /* We'll just catch it later when the
3261                  * device is up'd.
3262                  */
3263                 tg3_set_mtu(dev, tp, new_mtu);
3264                 return 0;
3265         }
3266
3267         tg3_netif_stop(tp);
3268         spin_lock_irq(&tp->lock);
3269         spin_lock(&tp->tx_lock);
3270
3271         tg3_halt(tp);
3272
3273         tg3_set_mtu(dev, tp, new_mtu);
3274
3275         tg3_init_hw(tp);
3276
3277         tg3_netif_start(tp);
3278
3279         spin_unlock(&tp->tx_lock);
3280         spin_unlock_irq(&tp->lock);
3281
3282         return 0;
3283 }
3284
3285 /* Free up pending packets in all rx/tx rings.
3286  *
3287  * The chip has been shut down and the driver detached from
3288  * the networking, so no interrupts or new tx packets will
3289  * end up in the driver.  tp->{tx,}lock is not held and we are not
3290  * in an interrupt context and thus may sleep.
3291  */
3292 static void tg3_free_rings(struct tg3 *tp)
3293 {
3294         struct ring_info *rxp;
3295         int i;
3296
3297         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3298                 rxp = &tp->rx_std_buffers[i];
3299
3300                 if (rxp->skb == NULL)
3301                         continue;
3302                 pci_unmap_single(tp->pdev,
3303                                  pci_unmap_addr(rxp, mapping),
3304                                  RX_PKT_BUF_SZ - tp->rx_offset,
3305                                  PCI_DMA_FROMDEVICE);
3306                 dev_kfree_skb_any(rxp->skb);
3307                 rxp->skb = NULL;
3308         }
3309
3310         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3311                 rxp = &tp->rx_jumbo_buffers[i];
3312
3313                 if (rxp->skb == NULL)
3314                         continue;
3315                 pci_unmap_single(tp->pdev,
3316                                  pci_unmap_addr(rxp, mapping),
3317                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3318                                  PCI_DMA_FROMDEVICE);
3319                 dev_kfree_skb_any(rxp->skb);
3320                 rxp->skb = NULL;
3321         }
3322
3323         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3324                 struct tx_ring_info *txp;
3325                 struct sk_buff *skb;
3326                 int j;
3327
3328                 txp = &tp->tx_buffers[i];
3329                 skb = txp->skb;
3330
3331                 if (skb == NULL) {
3332                         i++;
3333                         continue;
3334                 }
3335
3336                 pci_unmap_single(tp->pdev,
3337                                  pci_unmap_addr(txp, mapping),
3338                                  skb_headlen(skb),
3339                                  PCI_DMA_TODEVICE);
3340                 txp->skb = NULL;
3341
3342                 i++;
3343
3344                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3345                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3346                         pci_unmap_page(tp->pdev,
3347                                        pci_unmap_addr(txp, mapping),
3348                                        skb_shinfo(skb)->frags[j].size,
3349                                        PCI_DMA_TODEVICE);
3350                         i++;
3351                 }
3352
3353                 dev_kfree_skb_any(skb);
3354         }
3355 }
3356
3357 /* Initialize tx/rx rings for packet processing.
3358  *
3359  * The chip has been shut down and the driver detached from
3360  * the networking, so no interrupts or new tx packets will
3361  * end up in the driver.  tp->{tx,}lock are held and thus
3362  * we may not sleep.
3363  */
3364 static void tg3_init_rings(struct tg3 *tp)
3365 {
3366         u32 i;
3367
3368         /* Free up all the SKBs. */
3369         tg3_free_rings(tp);
3370
3371         /* Zero out all descriptors. */
3372         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3373         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3374         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3375         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3376
3377         /* Initialize invariants of the rings, we only set this
3378          * stuff once.  This works because the card does not
3379          * write into the rx buffer posting rings.
3380          */
3381         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3382                 struct tg3_rx_buffer_desc *rxd;
3383
3384                 rxd = &tp->rx_std[i];
3385                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3386                         << RXD_LEN_SHIFT;
3387                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3388                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3389                                (i << RXD_OPAQUE_INDEX_SHIFT));
3390         }
3391
3392         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3393                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3394                         struct tg3_rx_buffer_desc *rxd;
3395
3396                         rxd = &tp->rx_jumbo[i];
3397                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3398                                 << RXD_LEN_SHIFT;
3399                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3400                                 RXD_FLAG_JUMBO;
3401                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3402                                (i << RXD_OPAQUE_INDEX_SHIFT));
3403                 }
3404         }
3405
3406         /* Now allocate fresh SKBs for each rx ring. */
3407         for (i = 0; i < tp->rx_pending; i++) {
3408                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3409                                      -1, i) < 0)
3410                         break;
3411         }
3412
3413         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3414                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3415                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3416                                              -1, i) < 0)
3417                                 break;
3418                 }
3419         }
3420 }
3421
3422 /*
3423  * Must not be invoked with interrupt sources disabled and
3424  * the hardware shutdown down.
3425  */
3426 static void tg3_free_consistent(struct tg3 *tp)
3427 {
3428         if (tp->rx_std_buffers) {
3429                 kfree(tp->rx_std_buffers);
3430                 tp->rx_std_buffers = NULL;
3431         }
3432         if (tp->rx_std) {
3433                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3434                                     tp->rx_std, tp->rx_std_mapping);
3435                 tp->rx_std = NULL;
3436         }
3437         if (tp->rx_jumbo) {
3438                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3439                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3440                 tp->rx_jumbo = NULL;
3441         }
3442         if (tp->rx_rcb) {
3443                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3444                                     tp->rx_rcb, tp->rx_rcb_mapping);
3445                 tp->rx_rcb = NULL;
3446         }
3447         if (tp->tx_ring) {
3448                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3449                         tp->tx_ring, tp->tx_desc_mapping);
3450                 tp->tx_ring = NULL;
3451         }
3452         if (tp->hw_status) {
3453                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3454                                     tp->hw_status, tp->status_mapping);
3455                 tp->hw_status = NULL;
3456         }
3457         if (tp->hw_stats) {
3458                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3459                                     tp->hw_stats, tp->stats_mapping);
3460                 tp->hw_stats = NULL;
3461         }
3462 }
3463
3464 /*
3465  * Must not be invoked with interrupt sources disabled and
3466  * the hardware shutdown down.  Can sleep.
3467  */
3468 static int tg3_alloc_consistent(struct tg3 *tp)
3469 {
3470         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3471                                       (TG3_RX_RING_SIZE +
3472                                        TG3_RX_JUMBO_RING_SIZE)) +
3473                                      (sizeof(struct tx_ring_info) *
3474                                       TG3_TX_RING_SIZE),
3475                                      GFP_KERNEL);
3476         if (!tp->rx_std_buffers)
3477                 return -ENOMEM;
3478
3479         memset(tp->rx_std_buffers, 0,
3480                (sizeof(struct ring_info) *
3481                 (TG3_RX_RING_SIZE +
3482                  TG3_RX_JUMBO_RING_SIZE)) +
3483                (sizeof(struct tx_ring_info) *
3484                 TG3_TX_RING_SIZE));
3485
3486         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3487         tp->tx_buffers = (struct tx_ring_info *)
3488                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3489
3490         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3491                                           &tp->rx_std_mapping);
3492         if (!tp->rx_std)
3493                 goto err_out;
3494
3495         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3496                                             &tp->rx_jumbo_mapping);
3497
3498         if (!tp->rx_jumbo)
3499                 goto err_out;
3500
3501         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3502                                           &tp->rx_rcb_mapping);
3503         if (!tp->rx_rcb)
3504                 goto err_out;
3505
3506         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3507                                            &tp->tx_desc_mapping);
3508         if (!tp->tx_ring)
3509                 goto err_out;
3510
3511         tp->hw_status = pci_alloc_consistent(tp->pdev,
3512                                              TG3_HW_STATUS_SIZE,
3513                                              &tp->status_mapping);
3514         if (!tp->hw_status)
3515                 goto err_out;
3516
3517         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3518                                             sizeof(struct tg3_hw_stats),
3519                                             &tp->stats_mapping);
3520         if (!tp->hw_stats)
3521                 goto err_out;
3522
3523         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3524         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3525
3526         return 0;
3527
3528 err_out:
3529         tg3_free_consistent(tp);
3530         return -ENOMEM;
3531 }
3532
3533 #define MAX_WAIT_CNT 1000
3534
3535 /* To stop a block, clear the enable bit and poll till it
3536  * clears.  tp->lock is held.
3537  */
3538 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3539 {
3540         unsigned int i;
3541         u32 val;
3542
3543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3545                 switch (ofs) {
3546                 case RCVLSC_MODE:
3547                 case DMAC_MODE:
3548                 case MBFREE_MODE:
3549                 case BUFMGR_MODE:
3550                 case MEMARB_MODE:
3551                         /* We can't enable/disable these bits of the
3552                          * 5705/5750, just say success.
3553                          */
3554                         return 0;
3555
3556                 default:
3557                         break;
3558                 };
3559         }
3560
3561         val = tr32(ofs);
3562         val &= ~enable_bit;
3563         tw32_f(ofs, val);
3564
3565         for (i = 0; i < MAX_WAIT_CNT; i++) {
3566                 udelay(100);
3567                 val = tr32(ofs);
3568                 if ((val & enable_bit) == 0)
3569                         break;
3570         }
3571
3572         if (i == MAX_WAIT_CNT) {
3573                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3574                        "ofs=%lx enable_bit=%x\n",
3575                        ofs, enable_bit);
3576                 return -ENODEV;
3577         }
3578
3579         return 0;
3580 }
3581
3582 /* tp->lock is held. */
3583 static int tg3_abort_hw(struct tg3 *tp)
3584 {
3585         int i, err;
3586
3587         tg3_disable_ints(tp);
3588
3589         tp->rx_mode &= ~RX_MODE_ENABLE;
3590         tw32_f(MAC_RX_MODE, tp->rx_mode);
3591         udelay(10);
3592
3593         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3594         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3595         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3596         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3597         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3598         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3599
3600         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3601         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3602         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3603         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3604         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3605         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3606         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3607         if (err)
3608                 goto out;
3609
3610         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3611         tw32_f(MAC_MODE, tp->mac_mode);
3612         udelay(40);
3613
3614         tp->tx_mode &= ~TX_MODE_ENABLE;
3615         tw32_f(MAC_TX_MODE, tp->tx_mode);
3616
3617         for (i = 0; i < MAX_WAIT_CNT; i++) {
3618                 udelay(100);
3619                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3620                         break;
3621         }
3622         if (i >= MAX_WAIT_CNT) {
3623                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3624                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3625                        tp->dev->name, tr32(MAC_TX_MODE));
3626                 return -ENODEV;
3627         }
3628
3629         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3630         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3631         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3632
3633         tw32(FTQ_RESET, 0xffffffff);
3634         tw32(FTQ_RESET, 0x00000000);
3635
3636         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3637         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3638         if (err)
3639                 goto out;
3640
3641         if (tp->hw_status)
3642                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3643         if (tp->hw_stats)
3644                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3645
3646 out:
3647         return err;
3648 }
3649
3650 /* tp->lock is held. */
3651 static int tg3_nvram_lock(struct tg3 *tp)
3652 {
3653         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3654                 int i;
3655
3656                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3657                 for (i = 0; i < 8000; i++) {
3658                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3659                                 break;
3660                         udelay(20);
3661                 }
3662                 if (i == 8000)
3663                         return -ENODEV;
3664         }
3665         return 0;
3666 }
3667
3668 /* tp->lock is held. */
3669 static void tg3_nvram_unlock(struct tg3 *tp)
3670 {
3671         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3672                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3673 }
3674
3675 /* tp->lock is held. */
3676 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3677 {
3678         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3679                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3680
3681         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3682                 switch (kind) {
3683                 case RESET_KIND_INIT:
3684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3685                                       DRV_STATE_START);
3686                         break;
3687
3688                 case RESET_KIND_SHUTDOWN:
3689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3690                                       DRV_STATE_UNLOAD);
3691                         break;
3692
3693                 case RESET_KIND_SUSPEND:
3694                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3695                                       DRV_STATE_SUSPEND);
3696                         break;
3697
3698                 default:
3699                         break;
3700                 };
3701         }
3702 }
3703
3704 /* tp->lock is held. */
3705 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3706 {
3707         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3708                 switch (kind) {
3709                 case RESET_KIND_INIT:
3710                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3711                                       DRV_STATE_START_DONE);
3712                         break;
3713
3714                 case RESET_KIND_SHUTDOWN:
3715                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3716                                       DRV_STATE_UNLOAD_DONE);
3717                         break;
3718
3719                 default:
3720                         break;
3721                 };
3722         }
3723 }
3724
3725 /* tp->lock is held. */
3726 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3727 {
3728         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3729                 switch (kind) {
3730                 case RESET_KIND_INIT:
3731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3732                                       DRV_STATE_START);
3733                         break;
3734
3735                 case RESET_KIND_SHUTDOWN:
3736                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3737                                       DRV_STATE_UNLOAD);
3738                         break;
3739
3740                 case RESET_KIND_SUSPEND:
3741                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3742                                       DRV_STATE_SUSPEND);
3743                         break;
3744
3745                 default:
3746                         break;
3747                 };
3748         }
3749 }
3750
3751 static void tg3_stop_fw(struct tg3 *);
3752
3753 /* tp->lock is held. */
3754 static int tg3_chip_reset(struct tg3 *tp)
3755 {
3756         u32 val;
3757         u32 flags_save;
3758         int i;
3759
3760         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3761                 tg3_nvram_lock(tp);
3762
3763         /*
3764          * We must avoid the readl() that normally takes place.
3765          * It locks machines, causes machine checks, and other
3766          * fun things.  So, temporarily disable the 5701
3767          * hardware workaround, while we do the reset.
3768          */
3769         flags_save = tp->tg3_flags;
3770         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3771
3772         /* do the reset */
3773         val = GRC_MISC_CFG_CORECLK_RESET;
3774
3775         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3776                 if (tr32(0x7e2c) == 0x60) {
3777                         tw32(0x7e2c, 0x20);
3778                 }
3779                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3780                         tw32(GRC_MISC_CFG, (1 << 29));
3781                         val |= (1 << 29);
3782                 }
3783         }
3784
3785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3787                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3788         tw32(GRC_MISC_CFG, val);
3789
3790         /* restore 5701 hardware bug workaround flag */
3791         tp->tg3_flags = flags_save;
3792
3793         /* Unfortunately, we have to delay before the PCI read back.
3794          * Some 575X chips even will not respond to a PCI cfg access
3795          * when the reset command is given to the chip.
3796          *
3797          * How do these hardware designers expect things to work
3798          * properly if the PCI write is posted for a long period
3799          * of time?  It is always necessary to have some method by
3800          * which a register read back can occur to push the write
3801          * out which does the reset.
3802          *
3803          * For most tg3 variants the trick below was working.
3804          * Ho hum...
3805          */
3806         udelay(120);
3807
3808         /* Flush PCI posted writes.  The normal MMIO registers
3809          * are inaccessible at this time so this is the only
3810          * way to make this reliably (actually, this is no longer
3811          * the case, see above).  I tried to use indirect
3812          * register read/write but this upset some 5701 variants.
3813          */
3814         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3815
3816         udelay(120);
3817
3818         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3819                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3820                         int i;
3821                         u32 cfg_val;
3822
3823                         /* Wait for link training to complete.  */
3824                         for (i = 0; i < 5000; i++)
3825                                 udelay(100);
3826
3827                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3828                         pci_write_config_dword(tp->pdev, 0xc4,
3829                                                cfg_val | (1 << 15));
3830                 }
3831                 /* Set PCIE max payload size and clear error status.  */
3832                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3833         }
3834
3835         /* Re-enable indirect register accesses. */
3836         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3837                                tp->misc_host_ctrl);
3838
3839         /* Set MAX PCI retry to zero. */
3840         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3841         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3842             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3843                 val |= PCISTATE_RETRY_SAME_DMA;
3844         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3845
3846         pci_restore_state(tp->pdev);
3847
3848         /* Make sure PCI-X relaxed ordering bit is clear. */
3849         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3850         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3851         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3852
3853         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3854
3855         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3856                 tg3_stop_fw(tp);
3857                 tw32(0x5000, 0x400);
3858         }
3859
3860         tw32(GRC_MODE, tp->grc_mode);
3861
3862         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3863                 u32 val = tr32(0xc4);
3864
3865                 tw32(0xc4, val | (1 << 15));
3866         }
3867
3868         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3869             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3870                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3871                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3872                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3873                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3874         }
3875
3876         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3877                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3878                 tw32_f(MAC_MODE, tp->mac_mode);
3879         } else
3880                 tw32_f(MAC_MODE, 0);
3881         udelay(40);
3882
3883         /* Wait for firmware initialization to complete. */
3884         for (i = 0; i < 100000; i++) {
3885                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3886                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3887                         break;
3888                 udelay(10);
3889         }
3890         if (i >= 100000 &&
3891             !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3892                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3893                        "firmware will not restart magic=%08x\n",
3894                        tp->dev->name, val);
3895                 return -ENODEV;
3896         }
3897
3898         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3899             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3900                 u32 val = tr32(0x7c00);
3901
3902                 tw32(0x7c00, val | (1 << 25));
3903         }
3904
3905         /* Reprobe ASF enable state.  */
3906         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3907         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3908         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3909         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3910                 u32 nic_cfg;
3911
3912                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3913                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3914                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3915                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3916                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3917                 }
3918         }
3919
3920         return 0;
3921 }
3922
3923 /* tp->lock is held. */
3924 static void tg3_stop_fw(struct tg3 *tp)
3925 {
3926         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3927                 u32 val;
3928                 int i;
3929
3930                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3931                 val = tr32(GRC_RX_CPU_EVENT);
3932                 val |= (1 << 14);
3933                 tw32(GRC_RX_CPU_EVENT, val);
3934
3935                 /* Wait for RX cpu to ACK the event.  */
3936                 for (i = 0; i < 100; i++) {
3937                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3938                                 break;
3939                         udelay(1);
3940                 }
3941         }
3942 }
3943
3944 /* tp->lock is held. */
3945 static int tg3_halt(struct tg3 *tp)
3946 {
3947         int err;
3948
3949         tg3_stop_fw(tp);
3950
3951         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3952
3953         tg3_abort_hw(tp);
3954         err = tg3_chip_reset(tp);
3955
3956         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3957         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3958
3959         if (err)
3960                 return err;
3961
3962         return 0;
3963 }
3964
3965 #define TG3_FW_RELEASE_MAJOR    0x0
3966 #define TG3_FW_RELASE_MINOR     0x0
3967 #define TG3_FW_RELEASE_FIX      0x0
3968 #define TG3_FW_START_ADDR       0x08000000
3969 #define TG3_FW_TEXT_ADDR        0x08000000
3970 #define TG3_FW_TEXT_LEN         0x9c0
3971 #define TG3_FW_RODATA_ADDR      0x080009c0
3972 #define TG3_FW_RODATA_LEN       0x60
3973 #define TG3_FW_DATA_ADDR        0x08000a40
3974 #define TG3_FW_DATA_LEN         0x20
3975 #define TG3_FW_SBSS_ADDR        0x08000a60
3976 #define TG3_FW_SBSS_LEN         0xc
3977 #define TG3_FW_BSS_ADDR         0x08000a70
3978 #define TG3_FW_BSS_LEN          0x10
3979
3980 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3981         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3982         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3983         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3984         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3985         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3986         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3987         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3988         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3989         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3990         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3991         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3992         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3993         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3994         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3995         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3996         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3997         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3998         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3999         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4000         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4001         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4002         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4003         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4004         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4005         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4006         0, 0, 0, 0, 0, 0,
4007         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4008         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4009         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4010         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4011         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4012         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4013         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4014         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4015         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4016         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4017         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4018         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4019         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4020         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4021         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4022         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4023         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4024         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4025         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4026         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4027         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4028         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4029         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4030         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4031         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4032         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4033         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4034         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4035         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4036         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4037         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4038         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4039         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4040         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4041         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4042         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4043         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4044         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4045         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4046         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4047         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4048         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4049         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4050         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4051         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4052         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4053         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4054         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4055         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4056         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4057         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4058         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4059         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4060         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4061         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4062         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4063         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4064         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4065         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4066         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4067         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4068         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4069         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4070         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4071         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4072 };
4073
4074 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4075         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4076         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4077         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4078         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4079         0x00000000
4080 };
4081
4082 #if 0 /* All zeros, don't eat up space with it. */
4083 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4084         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4085         0x00000000, 0x00000000, 0x00000000, 0x00000000
4086 };
4087 #endif
4088
4089 #define RX_CPU_SCRATCH_BASE     0x30000
4090 #define RX_CPU_SCRATCH_SIZE     0x04000
4091 #define TX_CPU_SCRATCH_BASE     0x34000
4092 #define TX_CPU_SCRATCH_SIZE     0x04000
4093
4094 /* tp->lock is held. */
4095 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4096 {
4097         int i;
4098
4099         if (offset == TX_CPU_BASE &&
4100             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4101                 BUG();
4102
4103         if (offset == RX_CPU_BASE) {
4104                 for (i = 0; i < 10000; i++) {
4105                         tw32(offset + CPU_STATE, 0xffffffff);
4106                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4107                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4108                                 break;
4109                 }
4110
4111                 tw32(offset + CPU_STATE, 0xffffffff);
4112                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4113                 udelay(10);
4114         } else {
4115                 for (i = 0; i < 10000; i++) {
4116                         tw32(offset + CPU_STATE, 0xffffffff);
4117                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4118                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4119                                 break;
4120                 }
4121         }
4122
4123         if (i >= 10000) {
4124                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4125                        "and %s CPU\n",
4126                        tp->dev->name,
4127                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4128                 return -ENODEV;
4129         }
4130         return 0;
4131 }
4132
4133 struct fw_info {
4134         unsigned int text_base;
4135         unsigned int text_len;
4136         u32 *text_data;
4137         unsigned int rodata_base;
4138         unsigned int rodata_len;
4139         u32 *rodata_data;
4140         unsigned int data_base;
4141         unsigned int data_len;
4142         u32 *data_data;
4143 };
4144
4145 /* tp->lock is held. */
4146 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4147                                  int cpu_scratch_size, struct fw_info *info)
4148 {
4149         int err, i;
4150         u32 orig_tg3_flags = tp->tg3_flags;
4151         void (*write_op)(struct tg3 *, u32, u32);
4152
4153         if (cpu_base == TX_CPU_BASE &&
4154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4155                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4156                        "TX cpu firmware on %s which is 5705.\n",
4157                        tp->dev->name);
4158                 return -EINVAL;
4159         }
4160
4161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4162                 write_op = tg3_write_mem;
4163         else
4164                 write_op = tg3_write_indirect_reg32;
4165
4166         /* Force use of PCI config space for indirect register
4167          * write calls.
4168          */
4169         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4170
4171         err = tg3_halt_cpu(tp, cpu_base);
4172         if (err)
4173                 goto out;
4174
4175         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4176                 write_op(tp, cpu_scratch_base + i, 0);
4177         tw32(cpu_base + CPU_STATE, 0xffffffff);
4178         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4179         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4180                 write_op(tp, (cpu_scratch_base +
4181                               (info->text_base & 0xffff) +
4182                               (i * sizeof(u32))),
4183                          (info->text_data ?
4184                           info->text_data[i] : 0));
4185         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4186                 write_op(tp, (cpu_scratch_base +
4187                               (info->rodata_base & 0xffff) +
4188                               (i * sizeof(u32))),
4189                          (info->rodata_data ?
4190                           info->rodata_data[i] : 0));
4191         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4192                 write_op(tp, (cpu_scratch_base +
4193                               (info->data_base & 0xffff) +
4194                               (i * sizeof(u32))),
4195                          (info->data_data ?
4196                           info->data_data[i] : 0));
4197
4198         err = 0;
4199
4200 out:
4201         tp->tg3_flags = orig_tg3_flags;
4202         return err;
4203 }
4204
4205 /* tp->lock is held. */
4206 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4207 {
4208         struct fw_info info;
4209         int err, i;
4210
4211         info.text_base = TG3_FW_TEXT_ADDR;
4212         info.text_len = TG3_FW_TEXT_LEN;
4213         info.text_data = &tg3FwText[0];
4214         info.rodata_base = TG3_FW_RODATA_ADDR;
4215         info.rodata_len = TG3_FW_RODATA_LEN;
4216         info.rodata_data = &tg3FwRodata[0];
4217         info.data_base = TG3_FW_DATA_ADDR;
4218         info.data_len = TG3_FW_DATA_LEN;
4219         info.data_data = NULL;
4220
4221         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4222                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4223                                     &info);
4224         if (err)
4225                 return err;
4226
4227         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4228                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4229                                     &info);
4230         if (err)
4231                 return err;
4232
4233         /* Now startup only the RX cpu. */
4234         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4235         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4236
4237         for (i = 0; i < 5; i++) {
4238                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4239                         break;
4240                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4241                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4242                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4243                 udelay(1000);
4244         }
4245         if (i >= 5) {
4246                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4247                        "to set RX CPU PC, is %08x should be %08x\n",
4248                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4249                        TG3_FW_TEXT_ADDR);
4250                 return -ENODEV;
4251         }
4252         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4253         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4254
4255         return 0;
4256 }
4257
4258 #if TG3_TSO_SUPPORT != 0
4259
4260 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4261 #define TG3_TSO_FW_RELASE_MINOR         0x6
4262 #define TG3_TSO_FW_RELEASE_FIX          0x0
4263 #define TG3_TSO_FW_START_ADDR           0x08000000
4264 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4265 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4266 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4267 #define TG3_TSO_FW_RODATA_LEN           0x60
4268 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4269 #define TG3_TSO_FW_DATA_LEN             0x30
4270 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4271 #define TG3_TSO_FW_SBSS_LEN             0x2c
4272 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4273 #define TG3_TSO_FW_BSS_LEN              0x894
4274
4275 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4276         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4277         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4278         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4279         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4280         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4281         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4282         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4283         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4284         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4285         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4286         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4287         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4288         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4289         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4290         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4291         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4292         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4293         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4294         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4295         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4296         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4297         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4298         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4299         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4300         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4301         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4302         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4303         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4304         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4305         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4306         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4307         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4308         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4309         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4310         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4311         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4312         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4313         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4314         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4315         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4316         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4317         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4318         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4319         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4320         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4321         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4322         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4323         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4324         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4325         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4326         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4327         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4328         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4329         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4330         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4331         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4332         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4333         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4334         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4335         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4336         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4337         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4338         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4339         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4340         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4341         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4342         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4343         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4344         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4345         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4346         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4347         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4348         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4349         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4350         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4351         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4352         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4353         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4354         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4355         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4356         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4357         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4358         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4359         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4360         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4361         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4362         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4363         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4364         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4365         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4366         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4367         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4368         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4369         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4370         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4371         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4372         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4373         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4374         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4375         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4376         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4377         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4378         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4379         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4380         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4381         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4382         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4383         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4384         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4385         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4386         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4387         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4388         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4389         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4390         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4391         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4392         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4393         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4394         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4395         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4396         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4397         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4398         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4399         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4400         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4401         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4402         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4403         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4404         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4405         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4406         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4407         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4408         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4409         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4410         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4411         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4412         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4413         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4414         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4415         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4416         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4417         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4418         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4419         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4420         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4421         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4422         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4423         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4424         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4425         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4426         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4427         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4428         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4429         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4430         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4431         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4432         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4433         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4434         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4435         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4436         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4437         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4438         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4439         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4440         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4441         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4442         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4443         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4444         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4445         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4446         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4447         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4448         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4449         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4450         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4451         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4452         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4453         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4454         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4455         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4456         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4457         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4458         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4459         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4460         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4461         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4462         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4463         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4464         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4465         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4466         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4467         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4468         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4469         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4470         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4471         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4472         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4473         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4474         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4475         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4476         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4477         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4478         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4479         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4480         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4481         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4482         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4483         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4484         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4485         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4486         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4487         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4488         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4489         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4490         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4491         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4492         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4493         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4494         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4495         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4496         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4497         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4498         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4499         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4500         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4501         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4502         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4503         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4504         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4505         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4506         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4507         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4508         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4509         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4510         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4511         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4512         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4513         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4514         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4515         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4516         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4517         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4518         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4519         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4520         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4521         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4522         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4523         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4524         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4525         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4526         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4527         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4528         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4529         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4530         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4531         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4532         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4533         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4534         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4535         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4536         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4537         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4538         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4539         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4540         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4541         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4542         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4543         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4544         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4545         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4546         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4547         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4548         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4549         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4550         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4551         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4552         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4553         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4554         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4555         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4556         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4557         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4558         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4559         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4560 };
4561
4562 static u32 tg3TsoFwRodata[] = {
4563         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4564         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4565         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4566         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4567         0x00000000,
4568 };
4569
4570 static u32 tg3TsoFwData[] = {
4571         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4572         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4573         0x00000000,
4574 };
4575
4576 /* 5705 needs a special version of the TSO firmware.  */
4577 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4578 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4579 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4580 #define TG3_TSO5_FW_START_ADDR          0x00010000
4581 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4582 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4583 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4584 #define TG3_TSO5_FW_RODATA_LEN          0x50
4585 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4586 #define TG3_TSO5_FW_DATA_LEN            0x20
4587 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4588 #define TG3_TSO5_FW_SBSS_LEN            0x28
4589 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4590 #define TG3_TSO5_FW_BSS_LEN             0x88
4591
4592 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4593         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4594         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4595         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4596         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4597         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4598         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4599         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4600         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4601         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4602         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4603         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4604         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4605         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4606         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4607         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4608         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4609         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4610         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4611         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4612         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4613         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4614         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4615         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4616         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4617         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4618         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4619         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4620         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4621         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4622         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4623         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4624         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4625         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4626         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4627         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4628         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4629         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4630         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4631         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4632         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4633         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4634         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4635         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4636         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4637         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4638         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4639         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4640         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4641         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4642         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4643         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4644         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4645         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4646         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4647         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4648         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4649         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4650         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4651         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4652         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4653         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4654         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4655         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4656         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4657         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4658         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4659         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4660         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4661         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4662         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4663         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4664         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4665         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4666         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4667         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4668         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4669         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4670         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4671         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4672         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4673         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4674         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4675         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4676         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4677         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4678         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4679         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4680         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4681         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4682         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4683         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4684         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4685         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4686         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4687         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4688         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4689         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4690         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4691         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4692         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4693         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4694         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4695         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4696         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4697         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4698         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4699         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4700         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4701         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4702         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4703         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4704         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4705         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4706         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4707         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4708         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4709         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4710         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4711         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4712         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4713         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4714         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4715         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4716         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4717         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4718         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4719         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4720         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4721         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4722         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4723         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4724         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4725         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4726         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4727         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4728         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4729         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4730         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4731         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4732         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4733         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4734         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4735         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4736         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4737         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4738         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4739         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4740         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4741         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4742         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4743         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4744         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4745         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4746         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4747         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4748         0x00000000, 0x00000000, 0x00000000,
4749 };
4750
4751 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4752         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4753         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4754         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4755         0x00000000, 0x00000000, 0x00000000,
4756 };
4757
4758 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4759         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4760         0x00000000, 0x00000000, 0x00000000,
4761 };
4762
4763 /* tp->lock is held. */
4764 static int tg3_load_tso_firmware(struct tg3 *tp)
4765 {
4766         struct fw_info info;
4767         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4768         int err, i;
4769
4770         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4771                 return 0;
4772
4773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4774                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4775                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4776                 info.text_data = &tg3Tso5FwText[0];
4777                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4778                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4779                 info.rodata_data = &tg3Tso5FwRodata[0];
4780                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4781                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4782                 info.data_data = &tg3Tso5FwData[0];
4783                 cpu_base = RX_CPU_BASE;
4784                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4785                 cpu_scratch_size = (info.text_len +
4786                                     info.rodata_len +
4787                                     info.data_len +
4788                                     TG3_TSO5_FW_SBSS_LEN +
4789                                     TG3_TSO5_FW_BSS_LEN);
4790         } else {
4791                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4792                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4793                 info.text_data = &tg3TsoFwText[0];
4794                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4795                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4796                 info.rodata_data = &tg3TsoFwRodata[0];
4797                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4798                 info.data_len = TG3_TSO_FW_DATA_LEN;
4799                 info.data_data = &tg3TsoFwData[0];
4800                 cpu_base = TX_CPU_BASE;
4801                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4802                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4803         }
4804
4805         err = tg3_load_firmware_cpu(tp, cpu_base,
4806                                     cpu_scratch_base, cpu_scratch_size,
4807                                     &info);
4808         if (err)
4809                 return err;
4810
4811         /* Now startup the cpu. */
4812         tw32(cpu_base + CPU_STATE, 0xffffffff);
4813         tw32_f(cpu_base + CPU_PC,    info.text_base);
4814
4815         for (i = 0; i < 5; i++) {
4816                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4817                         break;
4818                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4819                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4820                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4821                 udelay(1000);
4822         }
4823         if (i >= 5) {
4824                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4825                        "to set CPU PC, is %08x should be %08x\n",
4826                        tp->dev->name, tr32(cpu_base + CPU_PC),
4827                        info.text_base);
4828                 return -ENODEV;
4829         }
4830         tw32(cpu_base + CPU_STATE, 0xffffffff);
4831         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4832         return 0;
4833 }
4834
4835 #endif /* TG3_TSO_SUPPORT != 0 */
4836
4837 /* tp->lock is held. */
4838 static void __tg3_set_mac_addr(struct tg3 *tp)
4839 {
4840         u32 addr_high, addr_low;
4841         int i;
4842
4843         addr_high = ((tp->dev->dev_addr[0] << 8) |
4844                      tp->dev->dev_addr[1]);
4845         addr_low = ((tp->dev->dev_addr[2] << 24) |
4846                     (tp->dev->dev_addr[3] << 16) |
4847                     (tp->dev->dev_addr[4] <<  8) |
4848                     (tp->dev->dev_addr[5] <<  0));
4849         for (i = 0; i < 4; i++) {
4850                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4851                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4852         }
4853
4854         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4855             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4856             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4857                 for (i = 0; i < 12; i++) {
4858                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4859                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4860                 }
4861         }
4862
4863         addr_high = (tp->dev->dev_addr[0] +
4864                      tp->dev->dev_addr[1] +
4865                      tp->dev->dev_addr[2] +
4866                      tp->dev->dev_addr[3] +
4867                      tp->dev->dev_addr[4] +
4868                      tp->dev->dev_addr[5]) &
4869                 TX_BACKOFF_SEED_MASK;
4870         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4871 }
4872
4873 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4874 {
4875         struct tg3 *tp = netdev_priv(dev);
4876         struct sockaddr *addr = p;
4877
4878         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4879
4880         spin_lock_irq(&tp->lock);
4881         __tg3_set_mac_addr(tp);
4882         spin_unlock_irq(&tp->lock);
4883
4884         return 0;
4885 }
4886
4887 /* tp->lock is held. */
4888 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4889                            dma_addr_t mapping, u32 maxlen_flags,
4890                            u32 nic_addr)
4891 {
4892         tg3_write_mem(tp,
4893                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4894                       ((u64) mapping >> 32));
4895         tg3_write_mem(tp,
4896                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4897                       ((u64) mapping & 0xffffffff));
4898         tg3_write_mem(tp,
4899                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4900                        maxlen_flags);
4901
4902         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4903                 tg3_write_mem(tp,
4904                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4905                               nic_addr);
4906 }
4907
4908 static void __tg3_set_rx_mode(struct net_device *);
4909
4910 /* tp->lock is held. */
4911 static int tg3_reset_hw(struct tg3 *tp)
4912 {
4913         u32 val, rdmac_mode;
4914         int i, err, limit;
4915
4916         tg3_disable_ints(tp);
4917
4918         tg3_stop_fw(tp);
4919
4920         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4921
4922         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4923                 err = tg3_abort_hw(tp);
4924                 if (err)
4925                         return err;
4926         }
4927
4928         err = tg3_chip_reset(tp);
4929         if (err)
4930                 return err;
4931
4932         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4933
4934         /* This works around an issue with Athlon chipsets on
4935          * B3 tigon3 silicon.  This bit has no effect on any
4936          * other revision.  But do not set this on PCI Express
4937          * chips.
4938          */
4939         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4940                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4941         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4942
4943         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4944             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4945                 val = tr32(TG3PCI_PCISTATE);
4946                 val |= PCISTATE_RETRY_SAME_DMA;
4947                 tw32(TG3PCI_PCISTATE, val);
4948         }
4949
4950         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4951                 /* Enable some hw fixes.  */
4952                 val = tr32(TG3PCI_MSI_DATA);
4953                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4954                 tw32(TG3PCI_MSI_DATA, val);
4955         }
4956
4957         /* Descriptor ring init may make accesses to the
4958          * NIC SRAM area to setup the TX descriptors, so we
4959          * can only do this after the hardware has been
4960          * successfully reset.
4961          */
4962         tg3_init_rings(tp);
4963
4964         /* This value is determined during the probe time DMA
4965          * engine test, tg3_test_dma.
4966          */
4967         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4968
4969         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4970                           GRC_MODE_4X_NIC_SEND_RINGS |
4971                           GRC_MODE_NO_TX_PHDR_CSUM |
4972                           GRC_MODE_NO_RX_PHDR_CSUM);
4973         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4974         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4975                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4976         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4977                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4978
4979         tw32(GRC_MODE,
4980              tp->grc_mode |
4981              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4982
4983         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4984         val = tr32(GRC_MISC_CFG);
4985         val &= ~0xff;
4986         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4987         tw32(GRC_MISC_CFG, val);
4988
4989         /* Initialize MBUF/DESC pool. */
4990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4991                 /* Do nothing.  */
4992         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4993                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4995                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4996                 else
4997                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4998                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4999                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5000         }
5001 #if TG3_TSO_SUPPORT != 0
5002         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5003                 int fw_len;
5004
5005                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5006                           TG3_TSO5_FW_RODATA_LEN +
5007                           TG3_TSO5_FW_DATA_LEN +
5008                           TG3_TSO5_FW_SBSS_LEN +
5009                           TG3_TSO5_FW_BSS_LEN);
5010                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5011                 tw32(BUFMGR_MB_POOL_ADDR,
5012                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5013                 tw32(BUFMGR_MB_POOL_SIZE,
5014                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5015         }
5016 #endif
5017
5018         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5019                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5020                      tp->bufmgr_config.mbuf_read_dma_low_water);
5021                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5022                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5023                 tw32(BUFMGR_MB_HIGH_WATER,
5024                      tp->bufmgr_config.mbuf_high_water);
5025         } else {
5026                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5027                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5028                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5029                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5030                 tw32(BUFMGR_MB_HIGH_WATER,
5031                      tp->bufmgr_config.mbuf_high_water_jumbo);
5032         }
5033         tw32(BUFMGR_DMA_LOW_WATER,
5034              tp->bufmgr_config.dma_low_water);
5035         tw32(BUFMGR_DMA_HIGH_WATER,
5036              tp->bufmgr_config.dma_high_water);
5037
5038         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5039         for (i = 0; i < 2000; i++) {
5040                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5041                         break;
5042                 udelay(10);
5043         }
5044         if (i >= 2000) {
5045                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5046                        tp->dev->name);
5047                 return -ENODEV;
5048         }
5049
5050         /* Setup replenish threshold. */
5051         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5052
5053         /* Initialize TG3_BDINFO's at:
5054          *  RCVDBDI_STD_BD:     standard eth size rx ring
5055          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5056          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5057          *
5058          * like so:
5059          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5060          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5061          *                              ring attribute flags
5062          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5063          *
5064          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5065          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5066          *
5067          * The size of each ring is fixed in the firmware, but the location is
5068          * configurable.
5069          */
5070         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5071              ((u64) tp->rx_std_mapping >> 32));
5072         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5073              ((u64) tp->rx_std_mapping & 0xffffffff));
5074         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5075              NIC_SRAM_RX_BUFFER_DESC);
5076
5077         /* Don't even try to program the JUMBO/MINI buffer descriptor
5078          * configs on 5705.
5079          */
5080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5081             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5082                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5083                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5084         } else {
5085                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5086                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5087
5088                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5089                      BDINFO_FLAGS_DISABLED);
5090
5091                 /* Setup replenish threshold. */
5092                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5093
5094                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5095                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5096                              ((u64) tp->rx_jumbo_mapping >> 32));
5097                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5098                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5099                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5100                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5101                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5102                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5103                 } else {
5104                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5105                              BDINFO_FLAGS_DISABLED);
5106                 }
5107
5108         }
5109
5110         /* There is only one send ring on 5705/5750, no need to explicitly
5111          * disable the others.
5112          */
5113         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5114             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5115                 /* Clear out send RCB ring in SRAM. */
5116                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5117                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5118                                       BDINFO_FLAGS_DISABLED);
5119         }
5120
5121         tp->tx_prod = 0;
5122         tp->tx_cons = 0;
5123         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5124         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5125
5126         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5127                        tp->tx_desc_mapping,
5128                        (TG3_TX_RING_SIZE <<
5129                         BDINFO_FLAGS_MAXLEN_SHIFT),
5130                        NIC_SRAM_TX_BUFFER_DESC);
5131
5132         /* There is only one receive return ring on 5705/5750, no need
5133          * to explicitly disable the others.
5134          */
5135         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5137                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5138                      i += TG3_BDINFO_SIZE) {
5139                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5140                                       BDINFO_FLAGS_DISABLED);
5141                 }
5142         }
5143
5144         tp->rx_rcb_ptr = 0;
5145         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5146
5147         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5148                        tp->rx_rcb_mapping,
5149                        (TG3_RX_RCB_RING_SIZE(tp) <<
5150                         BDINFO_FLAGS_MAXLEN_SHIFT),
5151                        0);
5152
5153         tp->rx_std_ptr = tp->rx_pending;
5154         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5155                      tp->rx_std_ptr);
5156
5157         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5158                                                 tp->rx_jumbo_pending : 0;
5159         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5160                      tp->rx_jumbo_ptr);
5161
5162         /* Initialize MAC address and backoff seed. */
5163         __tg3_set_mac_addr(tp);
5164
5165         /* MTU + ethernet header + FCS + optional VLAN tag */
5166         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5167
5168         /* The slot time is changed by tg3_setup_phy if we
5169          * run at gigabit with half duplex.
5170          */
5171         tw32(MAC_TX_LENGTHS,
5172              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5173              (6 << TX_LENGTHS_IPG_SHIFT) |
5174              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5175
5176         /* Receive rules. */
5177         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5178         tw32(RCVLPC_CONFIG, 0x0181);
5179
5180         /* Calculate RDMAC_MODE setting early, we need it to determine
5181          * the RCVLPC_STATE_ENABLE mask.
5182          */
5183         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5184                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5185                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5186                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5187                       RDMAC_MODE_LNGREAD_ENAB);
5188         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5189                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5190         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5191              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5192             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5193                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5194                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5195                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5196                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5197                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5198                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5199                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5200                 }
5201         }
5202
5203 #if TG3_TSO_SUPPORT != 0
5204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5205                 rdmac_mode |= (1 << 27);
5206 #endif
5207
5208         /* Receive/send statistics. */
5209         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5210             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5211                 val = tr32(RCVLPC_STATS_ENABLE);
5212                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5213                 tw32(RCVLPC_STATS_ENABLE, val);
5214         } else {
5215                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5216         }
5217         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5218         tw32(SNDDATAI_STATSENAB, 0xffffff);
5219         tw32(SNDDATAI_STATSCTRL,
5220              (SNDDATAI_SCTRL_ENABLE |
5221               SNDDATAI_SCTRL_FASTUPD));
5222
5223         /* Setup host coalescing engine. */
5224         tw32(HOSTCC_MODE, 0);
5225         for (i = 0; i < 2000; i++) {
5226                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5227                         break;
5228                 udelay(10);
5229         }
5230
5231         tw32(HOSTCC_RXCOL_TICKS, 0);
5232         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5233         tw32(HOSTCC_RXMAX_FRAMES, 1);
5234         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5235         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5236             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5237                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5238                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5239         }
5240         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5241         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5242
5243         /* set status block DMA address */
5244         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5245              ((u64) tp->status_mapping >> 32));
5246         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5247              ((u64) tp->status_mapping & 0xffffffff));
5248
5249         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5250             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5251                 /* Status/statistics block address.  See tg3_timer,
5252                  * the tg3_periodic_fetch_stats call there, and
5253                  * tg3_get_stats to see how this works for 5705/5750 chips.
5254                  */
5255                 tw32(HOSTCC_STAT_COAL_TICKS,
5256                      DEFAULT_STAT_COAL_TICKS);
5257                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5258                      ((u64) tp->stats_mapping >> 32));
5259                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5260                      ((u64) tp->stats_mapping & 0xffffffff));
5261                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5262                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5263         }
5264
5265         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5266
5267         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5268         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5269         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5270             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5271                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5272
5273         /* Clear statistics/status block in chip, and status block in ram. */
5274         for (i = NIC_SRAM_STATS_BLK;
5275              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5276              i += sizeof(u32)) {
5277                 tg3_write_mem(tp, i, 0);
5278                 udelay(40);
5279         }
5280         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5281
5282         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5283                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5284         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5285         udelay(40);
5286
5287         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5289                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5290                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5291         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5292         udelay(100);
5293
5294         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5295         tr32(MAILBOX_INTERRUPT_0);
5296
5297         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5298             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5299                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5300                 udelay(40);
5301         }
5302
5303         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5304                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5305                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5306                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5307                WDMAC_MODE_LNGREAD_ENAB);
5308
5309         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5310              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5311             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5312                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5313                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5314                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5315                         /* nothing */
5316                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5317                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5318                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5319                         val |= WDMAC_MODE_RX_ACCEL;
5320                 }
5321         }
5322
5323         tw32_f(WDMAC_MODE, val);
5324         udelay(40);
5325
5326         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5327                 val = tr32(TG3PCI_X_CAPS);
5328                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5329                         val &= ~PCIX_CAPS_BURST_MASK;
5330                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5331                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5332                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5333                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5334                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5335                                 val |= (tp->split_mode_max_reqs <<
5336                                         PCIX_CAPS_SPLIT_SHIFT);
5337                 }
5338                 tw32(TG3PCI_X_CAPS, val);
5339         }
5340
5341         tw32_f(RDMAC_MODE, rdmac_mode);
5342         udelay(40);
5343
5344         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5345         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5346             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5347                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5348         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5349         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5350         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5351         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5352         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5353 #if TG3_TSO_SUPPORT != 0
5354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5355                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5356 #endif
5357         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5358         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5359
5360         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5361                 err = tg3_load_5701_a0_firmware_fix(tp);
5362                 if (err)
5363                         return err;
5364         }
5365
5366 #if TG3_TSO_SUPPORT != 0
5367         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5368                 err = tg3_load_tso_firmware(tp);
5369                 if (err)
5370                         return err;
5371         }
5372 #endif
5373
5374         tp->tx_mode = TX_MODE_ENABLE;
5375         tw32_f(MAC_TX_MODE, tp->tx_mode);
5376         udelay(100);
5377
5378         tp->rx_mode = RX_MODE_ENABLE;
5379         tw32_f(MAC_RX_MODE, tp->rx_mode);
5380         udelay(10);
5381
5382         if (tp->link_config.phy_is_low_power) {
5383                 tp->link_config.phy_is_low_power = 0;
5384                 tp->link_config.speed = tp->link_config.orig_speed;
5385                 tp->link_config.duplex = tp->link_config.orig_duplex;
5386                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5387         }
5388
5389         tp->mi_mode = MAC_MI_MODE_BASE;
5390         tw32_f(MAC_MI_MODE, tp->mi_mode);
5391         udelay(80);
5392
5393         tw32(MAC_LED_CTRL, tp->led_ctrl);
5394
5395         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5396         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5397                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5398                 udelay(10);
5399         }
5400         tw32_f(MAC_RX_MODE, tp->rx_mode);
5401         udelay(10);
5402
5403         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5404                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5405                         /* Set drive transmission level to 1.2V  */
5406                         val = tr32(MAC_SERDES_CFG);
5407                         val &= 0xfffff000;
5408                         val |= 0x880;
5409                         tw32(MAC_SERDES_CFG, val);
5410                 }
5411                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5412                         tw32(MAC_SERDES_CFG, 0x616000);
5413         }
5414
5415         /* Prevent chip from dropping frames when flow control
5416          * is enabled.
5417          */
5418         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5419
5420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5421             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5422                 /* Use hardware link auto-negotiation */
5423                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5424         }
5425
5426         err = tg3_setup_phy(tp, 1);
5427         if (err)
5428                 return err;
5429
5430         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5431                 u32 tmp;
5432
5433                 /* Clear CRC stats. */
5434                 tg3_readphy(tp, 0x1e, &tmp);
5435                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5436                 tg3_readphy(tp, 0x14, &tmp);
5437         }
5438
5439         __tg3_set_rx_mode(tp->dev);
5440
5441         /* Initialize receive rules. */
5442         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5443         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5444         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5445         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5446
5447         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5448             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5449                 limit = 8;
5450         else
5451                 limit = 16;
5452         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5453                 limit -= 4;
5454         switch (limit) {
5455         case 16:
5456                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5457         case 15:
5458                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5459         case 14:
5460                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5461         case 13:
5462                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5463         case 12:
5464                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5465         case 11:
5466                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5467         case 10:
5468                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5469         case 9:
5470                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5471         case 8:
5472                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5473         case 7:
5474                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5475         case 6:
5476                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5477         case 5:
5478                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5479         case 4:
5480                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5481         case 3:
5482                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5483         case 2:
5484         case 1:
5485
5486         default:
5487                 break;
5488         };
5489
5490         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5491
5492         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5493                 tg3_enable_ints(tp);
5494
5495         return 0;
5496 }
5497
5498 /* Called at device open time to get the chip ready for
5499  * packet processing.  Invoked with tp->lock held.
5500  */
5501 static int tg3_init_hw(struct tg3 *tp)
5502 {
5503         int err;
5504
5505         /* Force the chip into D0. */
5506         err = tg3_set_power_state(tp, 0);
5507         if (err)
5508                 goto out;
5509
5510         tg3_switch_clocks(tp);
5511
5512         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5513
5514         err = tg3_reset_hw(tp);
5515
5516 out:
5517         return err;
5518 }
5519
5520 #define TG3_STAT_ADD32(PSTAT, REG) \
5521 do {    u32 __val = tr32(REG); \
5522         (PSTAT)->low += __val; \
5523         if ((PSTAT)->low < __val) \
5524                 (PSTAT)->high += 1; \
5525 } while (0)
5526
5527 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5528 {
5529         struct tg3_hw_stats *sp = tp->hw_stats;
5530
5531         if (!netif_carrier_ok(tp->dev))
5532                 return;
5533
5534         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5535         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5536         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5537         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5538         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5539         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5540         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5541         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5542         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5543         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5544         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5545         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5546         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5547
5548         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5549         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5550         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5551         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5552         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5553         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5554         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5555         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5556         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5557         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5558         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5559         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5560         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5561         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5562 }
5563
5564 static void tg3_timer(unsigned long __opaque)
5565 {
5566         struct tg3 *tp = (struct tg3 *) __opaque;
5567         unsigned long flags;
5568
5569         spin_lock_irqsave(&tp->lock, flags);
5570         spin_lock(&tp->tx_lock);
5571
5572         /* All of this garbage is because when using non-tagged
5573          * IRQ status the mailbox/status_block protocol the chip
5574          * uses with the cpu is race prone.
5575          */
5576         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5577                 tw32(GRC_LOCAL_CTRL,
5578                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5579         } else {
5580                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5581                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5582         }
5583
5584         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5585                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5586                 spin_unlock(&tp->tx_lock);
5587                 spin_unlock_irqrestore(&tp->lock, flags);
5588                 schedule_work(&tp->reset_task);
5589                 return;
5590         }
5591
5592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5594                 tg3_periodic_fetch_stats(tp);
5595
5596         /* This part only runs once per second. */
5597         if (!--tp->timer_counter) {
5598                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5599                         u32 mac_stat;
5600                         int phy_event;
5601
5602                         mac_stat = tr32(MAC_STATUS);
5603
5604                         phy_event = 0;
5605                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5606                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5607                                         phy_event = 1;
5608                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5609                                 phy_event = 1;
5610
5611                         if (phy_event)
5612                                 tg3_setup_phy(tp, 0);
5613                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5614                         u32 mac_stat = tr32(MAC_STATUS);
5615                         int need_setup = 0;
5616
5617                         if (netif_carrier_ok(tp->dev) &&
5618                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5619                                 need_setup = 1;
5620                         }
5621                         if (! netif_carrier_ok(tp->dev) &&
5622                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5623                                          MAC_STATUS_SIGNAL_DET))) {
5624                                 need_setup = 1;
5625                         }
5626                         if (need_setup) {
5627                                 tw32_f(MAC_MODE,
5628                                      (tp->mac_mode &
5629                                       ~MAC_MODE_PORT_MODE_MASK));
5630                                 udelay(40);
5631                                 tw32_f(MAC_MODE, tp->mac_mode);
5632                                 udelay(40);
5633                                 tg3_setup_phy(tp, 0);
5634                         }
5635                 }
5636
5637                 tp->timer_counter = tp->timer_multiplier;
5638         }
5639
5640         /* Heartbeat is only sent once every 120 seconds.  */
5641         if (!--tp->asf_counter) {
5642                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5643                         u32 val;
5644
5645                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5646                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5647                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5648                         val = tr32(GRC_RX_CPU_EVENT);
5649                         val |= (1 << 14);
5650                         tw32(GRC_RX_CPU_EVENT, val);
5651                 }
5652                 tp->asf_counter = tp->asf_multiplier;
5653         }
5654
5655         spin_unlock(&tp->tx_lock);
5656         spin_unlock_irqrestore(&tp->lock, flags);
5657
5658         tp->timer.expires = jiffies + tp->timer_offset;
5659         add_timer(&tp->timer);
5660 }
5661
5662 static int tg3_open(struct net_device *dev)
5663 {
5664         struct tg3 *tp = netdev_priv(dev);
5665         int err;
5666
5667         spin_lock_irq(&tp->lock);
5668         spin_lock(&tp->tx_lock);
5669
5670         tg3_disable_ints(tp);
5671         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5672
5673         spin_unlock(&tp->tx_lock);
5674         spin_unlock_irq(&tp->lock);
5675
5676         /* The placement of this call is tied
5677          * to the setup and use of Host TX descriptors.
5678          */
5679         err = tg3_alloc_consistent(tp);
5680         if (err)
5681                 return err;
5682
5683         err = request_irq(dev->irq, tg3_interrupt,
5684                           SA_SHIRQ, dev->name, dev);
5685
5686         if (err) {
5687                 tg3_free_consistent(tp);
5688                 return err;
5689         }
5690
5691         spin_lock_irq(&tp->lock);
5692         spin_lock(&tp->tx_lock);
5693
5694         err = tg3_init_hw(tp);
5695         if (err) {
5696                 tg3_halt(tp);
5697                 tg3_free_rings(tp);
5698         } else {
5699                 tp->timer_offset = HZ / 10;
5700                 tp->timer_counter = tp->timer_multiplier = 10;
5701                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5702
5703                 init_timer(&tp->timer);
5704                 tp->timer.expires = jiffies + tp->timer_offset;
5705                 tp->timer.data = (unsigned long) tp;
5706                 tp->timer.function = tg3_timer;
5707                 add_timer(&tp->timer);
5708
5709                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5710         }
5711
5712         spin_unlock(&tp->tx_lock);
5713         spin_unlock_irq(&tp->lock);
5714
5715         if (err) {
5716                 free_irq(dev->irq, dev);
5717                 tg3_free_consistent(tp);
5718                 return err;
5719         }
5720
5721         spin_lock_irq(&tp->lock);
5722         spin_lock(&tp->tx_lock);
5723
5724         tg3_enable_ints(tp);
5725
5726         spin_unlock(&tp->tx_lock);
5727         spin_unlock_irq(&tp->lock);
5728
5729         netif_start_queue(dev);
5730
5731         return 0;
5732 }
5733
5734 #if 0
5735 /*static*/ void tg3_dump_state(struct tg3 *tp)
5736 {
5737         u32 val32, val32_2, val32_3, val32_4, val32_5;
5738         u16 val16;
5739         int i;
5740
5741         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5742         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5743         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5744                val16, val32);
5745
5746         /* MAC block */
5747         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5748                tr32(MAC_MODE), tr32(MAC_STATUS));
5749         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5750                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5751         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5752                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5753         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5754                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5755
5756         /* Send data initiator control block */
5757         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5758                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5759         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5760                tr32(SNDDATAI_STATSCTRL));
5761
5762         /* Send data completion control block */
5763         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5764
5765         /* Send BD ring selector block */
5766         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5767                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5768
5769         /* Send BD initiator control block */
5770         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5771                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5772
5773         /* Send BD completion control block */
5774         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5775
5776         /* Receive list placement control block */
5777         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5778                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5779         printk("       RCVLPC_STATSCTRL[%08x]\n",
5780                tr32(RCVLPC_STATSCTRL));
5781
5782         /* Receive data and receive BD initiator control block */
5783         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5784                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5785
5786         /* Receive data completion control block */
5787         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5788                tr32(RCVDCC_MODE));
5789
5790         /* Receive BD initiator control block */
5791         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5792                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5793
5794         /* Receive BD completion control block */
5795         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5796                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5797
5798         /* Receive list selector control block */
5799         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5800                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5801
5802         /* Mbuf cluster free block */
5803         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5804                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5805
5806         /* Host coalescing control block */
5807         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5808                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5809         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5810                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5811                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5812         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5813                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5814                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5815         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5816                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5817         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5818                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5819
5820         /* Memory arbiter control block */
5821         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5822                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5823
5824         /* Buffer manager control block */
5825         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5826                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5827         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5828                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5829         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5830                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5831                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5832                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5833
5834         /* Read DMA control block */
5835         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5836                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5837
5838         /* Write DMA control block */
5839         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5840                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5841
5842         /* DMA completion block */
5843         printk("DEBUG: DMAC_MODE[%08x]\n",
5844                tr32(DMAC_MODE));
5845
5846         /* GRC block */
5847         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5848                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5849         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5850                tr32(GRC_LOCAL_CTRL));
5851
5852         /* TG3_BDINFOs */
5853         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5854                tr32(RCVDBDI_JUMBO_BD + 0x0),
5855                tr32(RCVDBDI_JUMBO_BD + 0x4),
5856                tr32(RCVDBDI_JUMBO_BD + 0x8),
5857                tr32(RCVDBDI_JUMBO_BD + 0xc));
5858         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5859                tr32(RCVDBDI_STD_BD + 0x0),
5860                tr32(RCVDBDI_STD_BD + 0x4),
5861                tr32(RCVDBDI_STD_BD + 0x8),
5862                tr32(RCVDBDI_STD_BD + 0xc));
5863         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5864                tr32(RCVDBDI_MINI_BD + 0x0),
5865                tr32(RCVDBDI_MINI_BD + 0x4),
5866                tr32(RCVDBDI_MINI_BD + 0x8),
5867                tr32(RCVDBDI_MINI_BD + 0xc));
5868
5869         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5870         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5871         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5872         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5873         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5874                val32, val32_2, val32_3, val32_4);
5875
5876         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5877         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5878         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5879         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5880         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5881                val32, val32_2, val32_3, val32_4);
5882
5883         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5884         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5885         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5886         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5887         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5888         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5889                val32, val32_2, val32_3, val32_4, val32_5);
5890
5891         /* SW status block */
5892         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5893                tp->hw_status->status,
5894                tp->hw_status->status_tag,
5895                tp->hw_status->rx_jumbo_consumer,
5896                tp->hw_status->rx_consumer,
5897                tp->hw_status->rx_mini_consumer,
5898                tp->hw_status->idx[0].rx_producer,
5899                tp->hw_status->idx[0].tx_consumer);
5900
5901         /* SW statistics block */
5902         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5903                ((u32 *)tp->hw_stats)[0],
5904                ((u32 *)tp->hw_stats)[1],
5905                ((u32 *)tp->hw_stats)[2],
5906                ((u32 *)tp->hw_stats)[3]);
5907
5908         /* Mailboxes */
5909         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5910                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5911                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5912                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5913                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5914
5915         /* NIC side send descriptors. */
5916         for (i = 0; i < 6; i++) {
5917                 unsigned long txd;
5918
5919                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5920                         + (i * sizeof(struct tg3_tx_buffer_desc));
5921                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5922                        i,
5923                        readl(txd + 0x0), readl(txd + 0x4),
5924                        readl(txd + 0x8), readl(txd + 0xc));
5925         }
5926
5927         /* NIC side RX descriptors. */
5928         for (i = 0; i < 6; i++) {
5929                 unsigned long rxd;
5930
5931                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5932                         + (i * sizeof(struct tg3_rx_buffer_desc));
5933                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5934                        i,
5935                        readl(rxd + 0x0), readl(rxd + 0x4),
5936                        readl(rxd + 0x8), readl(rxd + 0xc));
5937                 rxd += (4 * sizeof(u32));
5938                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5939                        i,
5940                        readl(rxd + 0x0), readl(rxd + 0x4),
5941                        readl(rxd + 0x8), readl(rxd + 0xc));
5942         }
5943
5944         for (i = 0; i < 6; i++) {
5945                 unsigned long rxd;
5946
5947                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5948                         + (i * sizeof(struct tg3_rx_buffer_desc));
5949                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5950                        i,
5951                        readl(rxd + 0x0), readl(rxd + 0x4),
5952                        readl(rxd + 0x8), readl(rxd + 0xc));
5953                 rxd += (4 * sizeof(u32));
5954                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5955                        i,
5956                        readl(rxd + 0x0), readl(rxd + 0x4),
5957                        readl(rxd + 0x8), readl(rxd + 0xc));
5958         }
5959 }
5960 #endif
5961
5962 static struct net_device_stats *tg3_get_stats(struct net_device *);
5963 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5964
5965 static int tg3_close(struct net_device *dev)
5966 {
5967         struct tg3 *tp = netdev_priv(dev);
5968
5969         netif_stop_queue(dev);
5970
5971         del_timer_sync(&tp->timer);
5972
5973         spin_lock_irq(&tp->lock);
5974         spin_lock(&tp->tx_lock);
5975 #if 0
5976         tg3_dump_state(tp);
5977 #endif
5978
5979         tg3_disable_ints(tp);
5980
5981         tg3_halt(tp);
5982         tg3_free_rings(tp);
5983         tp->tg3_flags &=
5984                 ~(TG3_FLAG_INIT_COMPLETE |
5985                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5986         netif_carrier_off(tp->dev);
5987
5988         spin_unlock(&tp->tx_lock);
5989         spin_unlock_irq(&tp->lock);
5990
5991         free_irq(dev->irq, dev);
5992
5993         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5994                sizeof(tp->net_stats_prev));
5995         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5996                sizeof(tp->estats_prev));
5997
5998         tg3_free_consistent(tp);
5999
6000         return 0;
6001 }
6002
6003 static inline unsigned long get_stat64(tg3_stat64_t *val)
6004 {
6005         unsigned long ret;
6006
6007 #if (BITS_PER_LONG == 32)
6008         ret = val->low;
6009 #else
6010         ret = ((u64)val->high << 32) | ((u64)val->low);
6011 #endif
6012         return ret;
6013 }
6014
6015 static unsigned long calc_crc_errors(struct tg3 *tp)
6016 {
6017         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6018
6019         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6020             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6021              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6022                 unsigned long flags;
6023                 u32 val;
6024
6025                 spin_lock_irqsave(&tp->lock, flags);
6026                 tg3_readphy(tp, 0x1e, &val);
6027                 tg3_writephy(tp, 0x1e, val | 0x8000);
6028                 tg3_readphy(tp, 0x14, &val);
6029                 spin_unlock_irqrestore(&tp->lock, flags);
6030
6031                 tp->phy_crc_errors += val;
6032
6033                 return tp->phy_crc_errors;
6034         }
6035
6036         return get_stat64(&hw_stats->rx_fcs_errors);
6037 }
6038
6039 #define ESTAT_ADD(member) \
6040         estats->member =        old_estats->member + \
6041                                 get_stat64(&hw_stats->member)
6042
6043 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6044 {
6045         struct tg3_ethtool_stats *estats = &tp->estats;
6046         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6047         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6048
6049         if (!hw_stats)
6050                 return old_estats;
6051
6052         ESTAT_ADD(rx_octets);
6053         ESTAT_ADD(rx_fragments);
6054         ESTAT_ADD(rx_ucast_packets);
6055         ESTAT_ADD(rx_mcast_packets);
6056         ESTAT_ADD(rx_bcast_packets);
6057         ESTAT_ADD(rx_fcs_errors);
6058         ESTAT_ADD(rx_align_errors);
6059         ESTAT_ADD(rx_xon_pause_rcvd);
6060         ESTAT_ADD(rx_xoff_pause_rcvd);
6061         ESTAT_ADD(rx_mac_ctrl_rcvd);
6062         ESTAT_ADD(rx_xoff_entered);
6063         ESTAT_ADD(rx_frame_too_long_errors);
6064         ESTAT_ADD(rx_jabbers);
6065         ESTAT_ADD(rx_undersize_packets);
6066         ESTAT_ADD(rx_in_length_errors);
6067         ESTAT_ADD(rx_out_length_errors);
6068         ESTAT_ADD(rx_64_or_less_octet_packets);
6069         ESTAT_ADD(rx_65_to_127_octet_packets);
6070         ESTAT_ADD(rx_128_to_255_octet_packets);
6071         ESTAT_ADD(rx_256_to_511_octet_packets);
6072         ESTAT_ADD(rx_512_to_1023_octet_packets);
6073         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6074         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6075         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6076         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6077         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6078
6079         ESTAT_ADD(tx_octets);
6080         ESTAT_ADD(tx_collisions);
6081         ESTAT_ADD(tx_xon_sent);
6082         ESTAT_ADD(tx_xoff_sent);
6083         ESTAT_ADD(tx_flow_control);
6084         ESTAT_ADD(tx_mac_errors);
6085         ESTAT_ADD(tx_single_collisions);
6086         ESTAT_ADD(tx_mult_collisions);
6087         ESTAT_ADD(tx_deferred);
6088         ESTAT_ADD(tx_excessive_collisions);
6089         ESTAT_ADD(tx_late_collisions);
6090         ESTAT_ADD(tx_collide_2times);
6091         ESTAT_ADD(tx_collide_3times);
6092         ESTAT_ADD(tx_collide_4times);
6093         ESTAT_ADD(tx_collide_5times);
6094         ESTAT_ADD(tx_collide_6times);
6095         ESTAT_ADD(tx_collide_7times);
6096         ESTAT_ADD(tx_collide_8times);
6097         ESTAT_ADD(tx_collide_9times);
6098         ESTAT_ADD(tx_collide_10times);
6099         ESTAT_ADD(tx_collide_11times);
6100         ESTAT_ADD(tx_collide_12times);
6101         ESTAT_ADD(tx_collide_13times);
6102         ESTAT_ADD(tx_collide_14times);
6103         ESTAT_ADD(tx_collide_15times);
6104         ESTAT_ADD(tx_ucast_packets);
6105         ESTAT_ADD(tx_mcast_packets);
6106         ESTAT_ADD(tx_bcast_packets);
6107         ESTAT_ADD(tx_carrier_sense_errors);
6108         ESTAT_ADD(tx_discards);
6109         ESTAT_ADD(tx_errors);
6110
6111         ESTAT_ADD(dma_writeq_full);
6112         ESTAT_ADD(dma_write_prioq_full);
6113         ESTAT_ADD(rxbds_empty);
6114         ESTAT_ADD(rx_discards);
6115         ESTAT_ADD(rx_errors);
6116         ESTAT_ADD(rx_threshold_hit);
6117
6118         ESTAT_ADD(dma_readq_full);
6119         ESTAT_ADD(dma_read_prioq_full);
6120         ESTAT_ADD(tx_comp_queue_full);
6121
6122         ESTAT_ADD(ring_set_send_prod_index);
6123         ESTAT_ADD(ring_status_update);
6124         ESTAT_ADD(nic_irqs);
6125         ESTAT_ADD(nic_avoided_irqs);
6126         ESTAT_ADD(nic_tx_threshold_hit);
6127
6128         return estats;
6129 }
6130
6131 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6132 {
6133         struct tg3 *tp = netdev_priv(dev);
6134         struct net_device_stats *stats = &tp->net_stats;
6135         struct net_device_stats *old_stats = &tp->net_stats_prev;
6136         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6137
6138         if (!hw_stats)
6139                 return old_stats;
6140
6141         stats->rx_packets = old_stats->rx_packets +
6142                 get_stat64(&hw_stats->rx_ucast_packets) +
6143                 get_stat64(&hw_stats->rx_mcast_packets) +
6144                 get_stat64(&hw_stats->rx_bcast_packets);
6145                 
6146         stats->tx_packets = old_stats->tx_packets +
6147                 get_stat64(&hw_stats->tx_ucast_packets) +
6148                 get_stat64(&hw_stats->tx_mcast_packets) +
6149                 get_stat64(&hw_stats->tx_bcast_packets);
6150
6151         stats->rx_bytes = old_stats->rx_bytes +
6152                 get_stat64(&hw_stats->rx_octets);
6153         stats->tx_bytes = old_stats->tx_bytes +
6154                 get_stat64(&hw_stats->tx_octets);
6155
6156         stats->rx_errors = old_stats->rx_errors +
6157                 get_stat64(&hw_stats->rx_errors) +
6158                 get_stat64(&hw_stats->rx_discards);
6159         stats->tx_errors = old_stats->tx_errors +
6160                 get_stat64(&hw_stats->tx_errors) +
6161                 get_stat64(&hw_stats->tx_mac_errors) +
6162                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6163                 get_stat64(&hw_stats->tx_discards);
6164
6165         stats->multicast = old_stats->multicast +
6166                 get_stat64(&hw_stats->rx_mcast_packets);
6167         stats->collisions = old_stats->collisions +
6168                 get_stat64(&hw_stats->tx_collisions);
6169
6170         stats->rx_length_errors = old_stats->rx_length_errors +
6171                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6172                 get_stat64(&hw_stats->rx_undersize_packets);
6173
6174         stats->rx_over_errors = old_stats->rx_over_errors +
6175                 get_stat64(&hw_stats->rxbds_empty);
6176         stats->rx_frame_errors = old_stats->rx_frame_errors +
6177                 get_stat64(&hw_stats->rx_align_errors);
6178         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6179                 get_stat64(&hw_stats->tx_discards);
6180         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6181                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6182
6183         stats->rx_crc_errors = old_stats->rx_crc_errors +
6184                 calc_crc_errors(tp);
6185
6186         return stats;
6187 }
6188
6189 static inline u32 calc_crc(unsigned char *buf, int len)
6190 {
6191         u32 reg;
6192         u32 tmp;
6193         int j, k;
6194
6195         reg = 0xffffffff;
6196
6197         for (j = 0; j < len; j++) {
6198                 reg ^= buf[j];
6199
6200                 for (k = 0; k < 8; k++) {
6201                         tmp = reg & 0x01;
6202
6203                         reg >>= 1;
6204
6205                         if (tmp) {
6206                                 reg ^= 0xedb88320;
6207                         }
6208                 }
6209         }
6210
6211         return ~reg;
6212 }
6213
6214 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6215 {
6216         /* accept or reject all multicast frames */
6217         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6218         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6219         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6220         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6221 }
6222
6223 static void __tg3_set_rx_mode(struct net_device *dev)
6224 {
6225         struct tg3 *tp = netdev_priv(dev);
6226         u32 rx_mode;
6227
6228         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6229                                   RX_MODE_KEEP_VLAN_TAG);
6230
6231         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6232          * flag clear.
6233          */
6234 #if TG3_VLAN_TAG_USED
6235         if (!tp->vlgrp &&
6236             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6237                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6238 #else
6239         /* By definition, VLAN is disabled always in this
6240          * case.
6241          */
6242         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6243                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6244 #endif
6245
6246         if (dev->flags & IFF_PROMISC) {
6247                 /* Promiscuous mode. */
6248                 rx_mode |= RX_MODE_PROMISC;
6249         } else if (dev->flags & IFF_ALLMULTI) {
6250                 /* Accept all multicast. */
6251                 tg3_set_multi (tp, 1);
6252         } else if (dev->mc_count < 1) {
6253                 /* Reject all multicast. */
6254                 tg3_set_multi (tp, 0);
6255         } else {
6256                 /* Accept one or more multicast(s). */
6257                 struct dev_mc_list *mclist;
6258                 unsigned int i;
6259                 u32 mc_filter[4] = { 0, };
6260                 u32 regidx;
6261                 u32 bit;
6262                 u32 crc;
6263
6264                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6265                      i++, mclist = mclist->next) {
6266
6267                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6268                         bit = ~crc & 0x7f;
6269                         regidx = (bit & 0x60) >> 5;
6270                         bit &= 0x1f;
6271                         mc_filter[regidx] |= (1 << bit);
6272                 }
6273
6274                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6275                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6276                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6277                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6278         }
6279
6280         if (rx_mode != tp->rx_mode) {
6281                 tp->rx_mode = rx_mode;
6282                 tw32_f(MAC_RX_MODE, rx_mode);
6283                 udelay(10);
6284         }
6285 }
6286
6287 static void tg3_set_rx_mode(struct net_device *dev)
6288 {
6289         struct tg3 *tp = netdev_priv(dev);
6290
6291         spin_lock_irq(&tp->lock);
6292         spin_lock(&tp->tx_lock);
6293         __tg3_set_rx_mode(dev);
6294         spin_unlock(&tp->tx_lock);
6295         spin_unlock_irq(&tp->lock);
6296 }
6297
6298 #define TG3_REGDUMP_LEN         (32 * 1024)
6299
6300 static int tg3_get_regs_len(struct net_device *dev)
6301 {
6302         return TG3_REGDUMP_LEN;
6303 }
6304
6305 static void tg3_get_regs(struct net_device *dev,
6306                 struct ethtool_regs *regs, void *_p)
6307 {
6308         u32 *p = _p;
6309         struct tg3 *tp = netdev_priv(dev);
6310         u8 *orig_p = _p;
6311         int i;
6312
6313         regs->version = 0;
6314
6315         memset(p, 0, TG3_REGDUMP_LEN);
6316
6317         spin_lock_irq(&tp->lock);
6318         spin_lock(&tp->tx_lock);
6319
6320 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6321 #define GET_REG32_LOOP(base,len)                \
6322 do {    p = (u32 *)(orig_p + (base));           \
6323         for (i = 0; i < len; i += 4)            \
6324                 __GET_REG32((base) + i);        \
6325 } while (0)
6326 #define GET_REG32_1(reg)                        \
6327 do {    p = (u32 *)(orig_p + (reg));            \
6328         __GET_REG32((reg));                     \
6329 } while (0)
6330
6331         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6332         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6333         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6334         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6335         GET_REG32_1(SNDDATAC_MODE);
6336         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6337         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6338         GET_REG32_1(SNDBDC_MODE);
6339         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6340         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6341         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6342         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6343         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6344         GET_REG32_1(RCVDCC_MODE);
6345         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6346         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6347         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6348         GET_REG32_1(MBFREE_MODE);
6349         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6350         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6351         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6352         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6353         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6354         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6355         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6356         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6357         GET_REG32_LOOP(FTQ_RESET, 0x120);
6358         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6359         GET_REG32_1(DMAC_MODE);
6360         GET_REG32_LOOP(GRC_MODE, 0x4c);
6361         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6362                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6363
6364 #undef __GET_REG32
6365 #undef GET_REG32_LOOP
6366 #undef GET_REG32_1
6367
6368         spin_unlock(&tp->tx_lock);
6369         spin_unlock_irq(&tp->lock);
6370 }
6371
6372 static int tg3_get_eeprom_len(struct net_device *dev)
6373 {
6374         return EEPROM_CHIP_SIZE;
6375 }
6376
6377 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
6378                                         u32 offset, u32 *val);
6379 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6380 {
6381         struct tg3 *tp = netdev_priv(dev);
6382         int ret;
6383         u8  *pd;
6384         u32 i, offset, len, val, b_offset, b_count;
6385
6386         offset = eeprom->offset;
6387         len = eeprom->len;
6388         eeprom->len = 0;
6389
6390         ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6391         if (ret)
6392                 return ret;
6393         eeprom->magic = swab32(eeprom->magic);
6394
6395         if (offset & 3) {
6396                 /* adjustments to start on required 4 byte boundary */
6397                 b_offset = offset & 3;
6398                 b_count = 4 - b_offset;
6399                 if (b_count > len) {
6400                         /* i.e. offset=1 len=2 */
6401                         b_count = len;
6402                 }
6403                 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6404                 if (ret)
6405                         return ret;
6406                 memcpy(data, ((char*)&val) + b_offset, b_count);
6407                 len -= b_count;
6408                 offset += b_count;
6409                 eeprom->len += b_count;
6410         }
6411
6412         /* read bytes upto the last 4 byte boundary */
6413         pd = &data[eeprom->len];
6414         for (i = 0; i < (len - (len & 3)); i += 4) {
6415                 ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
6416                                 (u32*)(pd + i));
6417                 if (ret) {
6418                         eeprom->len += i;
6419                         return ret;
6420                 }
6421         }
6422         eeprom->len += i;
6423
6424         if (len & 3) {
6425                 /* read last bytes not ending on 4 byte boundary */
6426                 pd = &data[eeprom->len];
6427                 b_count = len & 3;
6428                 b_offset = offset + len - b_count;
6429                 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6430                 if (ret)
6431                         return ret;
6432                 memcpy(pd, ((char*)&val), b_count);
6433                 eeprom->len += b_count;
6434         }
6435         return 0;
6436 }
6437
6438 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6439 {
6440         struct tg3 *tp = netdev_priv(dev);
6441   
6442         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6443                                         tp->link_config.phy_is_low_power)
6444                 return -EAGAIN;
6445
6446         cmd->supported = (SUPPORTED_Autoneg);
6447
6448         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6449                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6450                                    SUPPORTED_1000baseT_Full);
6451
6452         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6453                 cmd->supported |= (SUPPORTED_100baseT_Half |
6454                                   SUPPORTED_100baseT_Full |
6455                                   SUPPORTED_10baseT_Half |
6456                                   SUPPORTED_10baseT_Full |
6457                                   SUPPORTED_MII);
6458         else
6459                 cmd->supported |= SUPPORTED_FIBRE;
6460   
6461         cmd->advertising = tp->link_config.advertising;
6462         cmd->speed = tp->link_config.active_speed;
6463         cmd->duplex = tp->link_config.active_duplex;
6464         cmd->port = 0;
6465         cmd->phy_address = PHY_ADDR;
6466         cmd->transceiver = 0;
6467         cmd->autoneg = tp->link_config.autoneg;
6468         cmd->maxtxpkt = 0;
6469         cmd->maxrxpkt = 0;
6470         return 0;
6471 }
6472   
6473 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6474 {
6475         struct tg3 *tp = netdev_priv(dev);
6476   
6477         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6478             tp->link_config.phy_is_low_power)
6479                 return -EAGAIN;
6480
6481         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6482                 /* These are the only valid advertisement bits allowed.  */
6483                 if (cmd->autoneg == AUTONEG_ENABLE &&
6484                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6485                                           ADVERTISED_1000baseT_Full |
6486                                           ADVERTISED_Autoneg |
6487                                           ADVERTISED_FIBRE)))
6488                         return -EINVAL;
6489         }
6490
6491         spin_lock_irq(&tp->lock);
6492         spin_lock(&tp->tx_lock);
6493
6494         tp->link_config.autoneg = cmd->autoneg;
6495         if (cmd->autoneg == AUTONEG_ENABLE) {
6496                 tp->link_config.advertising = cmd->advertising;
6497                 tp->link_config.speed = SPEED_INVALID;
6498                 tp->link_config.duplex = DUPLEX_INVALID;
6499         } else {
6500                 tp->link_config.advertising = 0;
6501                 tp->link_config.speed = cmd->speed;
6502                 tp->link_config.duplex = cmd->duplex;
6503         }
6504   
6505         tg3_setup_phy(tp, 1);
6506         spin_unlock(&tp->tx_lock);
6507         spin_unlock_irq(&tp->lock);
6508   
6509         return 0;
6510 }
6511   
6512 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6513 {
6514         struct tg3 *tp = netdev_priv(dev);
6515   
6516         strcpy(info->driver, DRV_MODULE_NAME);
6517         strcpy(info->version, DRV_MODULE_VERSION);
6518         strcpy(info->bus_info, pci_name(tp->pdev));
6519 }
6520   
6521 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6522 {
6523         struct tg3 *tp = netdev_priv(dev);
6524   
6525         wol->supported = WAKE_MAGIC;
6526         wol->wolopts = 0;
6527         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6528                 wol->wolopts = WAKE_MAGIC;
6529         memset(&wol->sopass, 0, sizeof(wol->sopass));
6530 }
6531   
6532 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6533 {
6534         struct tg3 *tp = netdev_priv(dev);
6535   
6536         if (wol->wolopts & ~WAKE_MAGIC)
6537                 return -EINVAL;
6538         if ((wol->wolopts & WAKE_MAGIC) &&
6539             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6540             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6541                 return -EINVAL;
6542   
6543         spin_lock_irq(&tp->lock);
6544         if (wol->wolopts & WAKE_MAGIC)
6545                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6546         else
6547                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6548         spin_unlock_irq(&tp->lock);
6549   
6550         return 0;
6551 }
6552   
6553 static u32 tg3_get_msglevel(struct net_device *dev)
6554 {
6555         struct tg3 *tp = netdev_priv(dev);
6556         return tp->msg_enable;
6557 }
6558   
6559 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6560 {
6561         struct tg3 *tp = netdev_priv(dev);
6562         tp->msg_enable = value;
6563 }
6564   
6565 #if TG3_TSO_SUPPORT != 0
6566 static int tg3_set_tso(struct net_device *dev, u32 value)
6567 {
6568         struct tg3 *tp = netdev_priv(dev);
6569
6570         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6571                 if (value)
6572                         return -EINVAL;
6573                 return 0;
6574         }
6575         return ethtool_op_set_tso(dev, value);
6576 }
6577 #endif
6578   
6579 static int tg3_nway_reset(struct net_device *dev)
6580 {
6581         struct tg3 *tp = netdev_priv(dev);
6582         u32 bmcr;
6583         int r;
6584   
6585         spin_lock_irq(&tp->lock);
6586         tg3_readphy(tp, MII_BMCR, &bmcr);
6587         tg3_readphy(tp, MII_BMCR, &bmcr);
6588         r = -EINVAL;
6589         if (bmcr & BMCR_ANENABLE) {
6590                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6591                 r = 0;
6592         }
6593         spin_unlock_irq(&tp->lock);
6594   
6595         return r;
6596 }
6597   
6598 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6599 {
6600         struct tg3 *tp = netdev_priv(dev);
6601   
6602         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6603         ering->rx_mini_max_pending = 0;
6604         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6605
6606         ering->rx_pending = tp->rx_pending;
6607         ering->rx_mini_pending = 0;
6608         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6609         ering->tx_pending = tp->tx_pending;
6610 }
6611   
6612 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6613 {
6614         struct tg3 *tp = netdev_priv(dev);
6615   
6616         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6617             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6618             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6619                 return -EINVAL;
6620   
6621         tg3_netif_stop(tp);
6622         spin_lock_irq(&tp->lock);
6623         spin_lock(&tp->tx_lock);
6624   
6625         tp->rx_pending = ering->rx_pending;
6626
6627         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6628             tp->rx_pending > 63)
6629                 tp->rx_pending = 63;
6630         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6631         tp->tx_pending = ering->tx_pending;
6632
6633         tg3_halt(tp);
6634         tg3_init_hw(tp);
6635         tg3_netif_start(tp);
6636         spin_unlock(&tp->tx_lock);
6637         spin_unlock_irq(&tp->lock);
6638   
6639         return 0;
6640 }
6641   
6642 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6643 {
6644         struct tg3 *tp = netdev_priv(dev);
6645   
6646         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6647         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6648         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6649 }
6650   
6651 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6652 {
6653         struct tg3 *tp = netdev_priv(dev);
6654   
6655         tg3_netif_stop(tp);
6656         spin_lock_irq(&tp->lock);
6657         spin_lock(&tp->tx_lock);
6658         if (epause->autoneg)
6659                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6660         else
6661                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6662         if (epause->rx_pause)
6663                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6664         else
6665                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6666         if (epause->tx_pause)
6667                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6668         else
6669                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6670         tg3_halt(tp);
6671         tg3_init_hw(tp);
6672         tg3_netif_start(tp);
6673         spin_unlock(&tp->tx_lock);
6674         spin_unlock_irq(&tp->lock);
6675   
6676         return 0;
6677 }
6678   
6679 static u32 tg3_get_rx_csum(struct net_device *dev)
6680 {
6681         struct tg3 *tp = netdev_priv(dev);
6682         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6683 }
6684   
6685 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6686 {
6687         struct tg3 *tp = netdev_priv(dev);
6688   
6689         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6690                 if (data != 0)
6691                         return -EINVAL;
6692                 return 0;
6693         }
6694   
6695         spin_lock_irq(&tp->lock);
6696         if (data)
6697                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6698         else
6699                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6700         spin_unlock_irq(&tp->lock);
6701   
6702         return 0;
6703 }
6704   
6705 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6706 {
6707         struct tg3 *tp = netdev_priv(dev);
6708   
6709         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6710                 if (data != 0)
6711                         return -EINVAL;
6712                 return 0;
6713         }
6714   
6715         if (data)
6716                 dev->features |= NETIF_F_IP_CSUM;
6717         else
6718                 dev->features &= ~NETIF_F_IP_CSUM;
6719
6720         return 0;
6721 }
6722
6723 static int tg3_get_stats_count (struct net_device *dev)
6724 {
6725         return TG3_NUM_STATS;
6726 }
6727
6728 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6729 {
6730         switch (stringset) {
6731         case ETH_SS_STATS:
6732                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6733                 break;
6734         default:
6735                 WARN_ON(1);     /* we need a WARN() */
6736                 break;
6737         }
6738 }
6739
6740 static void tg3_get_ethtool_stats (struct net_device *dev,
6741                                    struct ethtool_stats *estats, u64 *tmp_stats)
6742 {
6743         struct tg3 *tp = netdev_priv(dev);
6744         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6745 }
6746
6747 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6748 {
6749         struct mii_ioctl_data *data = if_mii(ifr);
6750         struct tg3 *tp = netdev_priv(dev);
6751         int err;
6752
6753         switch(cmd) {
6754         case SIOCGMIIPHY:
6755                 data->phy_id = PHY_ADDR;
6756
6757                 /* fallthru */
6758         case SIOCGMIIREG: {
6759                 u32 mii_regval;
6760
6761                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6762                         break;                  /* We have no PHY */
6763
6764                 spin_lock_irq(&tp->lock);
6765                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6766                 spin_unlock_irq(&tp->lock);
6767
6768                 data->val_out = mii_regval;
6769
6770                 return err;
6771         }
6772
6773         case SIOCSMIIREG:
6774                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6775                         break;                  /* We have no PHY */
6776
6777                 if (!capable(CAP_NET_ADMIN))
6778                         return -EPERM;
6779
6780                 spin_lock_irq(&tp->lock);
6781                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6782                 spin_unlock_irq(&tp->lock);
6783
6784                 return err;
6785
6786         default:
6787                 /* do nothing */
6788                 break;
6789         }
6790         return -EOPNOTSUPP;
6791 }
6792
6793 #if TG3_VLAN_TAG_USED
6794 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6795 {
6796         struct tg3 *tp = netdev_priv(dev);
6797
6798         spin_lock_irq(&tp->lock);
6799         spin_lock(&tp->tx_lock);
6800
6801         tp->vlgrp = grp;
6802
6803         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6804         __tg3_set_rx_mode(dev);
6805
6806         spin_unlock(&tp->tx_lock);
6807         spin_unlock_irq(&tp->lock);
6808 }
6809
6810 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6811 {
6812         struct tg3 *tp = netdev_priv(dev);
6813
6814         spin_lock_irq(&tp->lock);
6815         spin_lock(&tp->tx_lock);
6816         if (tp->vlgrp)
6817                 tp->vlgrp->vlan_devices[vid] = NULL;
6818         spin_unlock(&tp->tx_lock);
6819         spin_unlock_irq(&tp->lock);
6820 }
6821 #endif
6822
6823 static struct ethtool_ops tg3_ethtool_ops = {
6824         .get_settings           = tg3_get_settings,
6825         .set_settings           = tg3_set_settings,
6826         .get_drvinfo            = tg3_get_drvinfo,
6827         .get_regs_len           = tg3_get_regs_len,
6828         .get_regs               = tg3_get_regs,
6829         .get_wol                = tg3_get_wol,
6830         .set_wol                = tg3_set_wol,
6831         .get_msglevel           = tg3_get_msglevel,
6832         .set_msglevel           = tg3_set_msglevel,
6833         .nway_reset             = tg3_nway_reset,
6834         .get_link               = ethtool_op_get_link,
6835         .get_eeprom_len         = tg3_get_eeprom_len,
6836         .get_eeprom             = tg3_get_eeprom,
6837         .get_ringparam          = tg3_get_ringparam,
6838         .set_ringparam          = tg3_set_ringparam,
6839         .get_pauseparam         = tg3_get_pauseparam,
6840         .set_pauseparam         = tg3_set_pauseparam,
6841         .get_rx_csum            = tg3_get_rx_csum,
6842         .set_rx_csum            = tg3_set_rx_csum,
6843         .get_tx_csum            = ethtool_op_get_tx_csum,
6844         .set_tx_csum            = tg3_set_tx_csum,
6845         .get_sg                 = ethtool_op_get_sg,
6846         .set_sg                 = ethtool_op_set_sg,
6847 #if TG3_TSO_SUPPORT != 0
6848         .get_tso                = ethtool_op_get_tso,
6849         .set_tso                = tg3_set_tso,
6850 #endif
6851         .get_strings            = tg3_get_strings,
6852         .get_stats_count        = tg3_get_stats_count,
6853         .get_ethtool_stats      = tg3_get_ethtool_stats,
6854 };
6855
6856 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6857 static void __devinit tg3_nvram_init(struct tg3 *tp)
6858 {
6859         int j;
6860
6861         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
6862                 return;
6863
6864         tw32_f(GRC_EEPROM_ADDR,
6865              (EEPROM_ADDR_FSM_RESET |
6866               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6867                EEPROM_ADDR_CLKPERD_SHIFT)));
6868
6869         /* XXX schedule_timeout() ... */
6870         for (j = 0; j < 100; j++)
6871                 udelay(10);
6872
6873         /* Enable seeprom accesses. */
6874         tw32_f(GRC_LOCAL_CTRL,
6875              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6876         udelay(100);
6877
6878         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6879             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6880                 u32 nvcfg1;
6881
6882                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6883                         u32 nvaccess = tr32(NVRAM_ACCESS);
6884
6885                         tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6886                 }
6887
6888                 nvcfg1 = tr32(NVRAM_CFG1);
6889
6890                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6891                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6892                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6893                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6894                 } else {
6895                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6896                         tw32(NVRAM_CFG1, nvcfg1);
6897                 }
6898
6899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6900                         u32 nvaccess = tr32(NVRAM_ACCESS);
6901
6902                         tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6903                 }
6904         } else {
6905                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6906         }
6907 }
6908
6909 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
6910                                         u32 offset, u32 *val)
6911 {
6912         u32 tmp;
6913         int i;
6914
6915         if (offset > EEPROM_ADDR_ADDR_MASK ||
6916             (offset % 4) != 0)
6917                 return -EINVAL;
6918
6919         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6920                                         EEPROM_ADDR_DEVID_MASK |
6921                                         EEPROM_ADDR_READ);
6922         tw32(GRC_EEPROM_ADDR,
6923              tmp |
6924              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6925              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6926               EEPROM_ADDR_ADDR_MASK) |
6927              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6928
6929         for (i = 0; i < 10000; i++) {
6930                 tmp = tr32(GRC_EEPROM_ADDR);
6931
6932                 if (tmp & EEPROM_ADDR_COMPLETE)
6933                         break;
6934                 udelay(100);
6935         }
6936         if (!(tmp & EEPROM_ADDR_COMPLETE))
6937                 return -EBUSY;
6938
6939         *val = tr32(GRC_EEPROM_DATA);
6940         return 0;
6941 }
6942
6943 static int __devinit tg3_nvram_read(struct tg3 *tp,
6944                                     u32 offset, u32 *val)
6945 {
6946         int i;
6947
6948         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
6949                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
6950                 return -EINVAL;
6951         }
6952
6953         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6954                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6955
6956         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6957                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6958                           NVRAM_BUFFERED_PAGE_POS) +
6959                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6960
6961         if (offset > NVRAM_ADDR_MSK)
6962                 return -EINVAL;
6963
6964         tg3_nvram_lock(tp);
6965
6966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6967                 u32 nvaccess = tr32(NVRAM_ACCESS);
6968
6969                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6970         }
6971
6972         tw32(NVRAM_ADDR, offset);
6973         tw32(NVRAM_CMD,
6974              NVRAM_CMD_RD | NVRAM_CMD_GO |
6975              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6976
6977         /* Wait for done bit to clear. */
6978         for (i = 0; i < 1000; i++) {
6979                 udelay(10);
6980                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6981                         udelay(10);
6982                         *val = swab32(tr32(NVRAM_RDDATA));
6983                         break;
6984                 }
6985         }
6986
6987         tg3_nvram_unlock(tp);
6988
6989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6990                 u32 nvaccess = tr32(NVRAM_ACCESS);
6991
6992                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6993         }
6994
6995         if (i >= 1000)
6996                 return -EBUSY;
6997
6998         return 0;
6999 }
7000
7001 struct subsys_tbl_ent {
7002         u16 subsys_vendor, subsys_devid;
7003         u32 phy_id;
7004 };
7005
7006 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7007         /* Broadcom boards. */
7008         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7009         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7010         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7011         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7012         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7013         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7014         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7015         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7016         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7017         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7018         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7019
7020         /* 3com boards. */
7021         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7022         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7023         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7024         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7025         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7026
7027         /* DELL boards. */
7028         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7029         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7030         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7031         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7032
7033         /* Compaq boards. */
7034         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7035         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7036         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7037         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7038         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7039
7040         /* IBM boards. */
7041         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7042 };
7043
7044 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7045 {
7046         int i;
7047
7048         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7049                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7050                      tp->pdev->subsystem_vendor) &&
7051                     (subsys_id_to_phy_id[i].subsys_devid ==
7052                      tp->pdev->subsystem_device))
7053                         return &subsys_id_to_phy_id[i];
7054         }
7055         return NULL;
7056 }
7057
7058 static int __devinit tg3_phy_probe(struct tg3 *tp)
7059 {
7060         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7061         u32 hw_phy_id, hw_phy_id_masked;
7062         u32 val;
7063         int eeprom_signature_found, eeprom_phy_serdes, err;
7064
7065         tp->phy_id = PHY_ID_INVALID;
7066         eeprom_phy_id = PHY_ID_INVALID;
7067         eeprom_phy_serdes = 0;
7068         eeprom_signature_found = 0;
7069         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7070         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7071                 u32 nic_cfg, led_cfg;
7072                 u32 nic_phy_id, cfg2;
7073
7074                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7075                 tp->nic_sram_data_cfg = nic_cfg;
7076
7077                 eeprom_signature_found = 1;
7078
7079                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7080                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7081                         eeprom_phy_serdes = 1;
7082
7083                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7084                 if (nic_phy_id != 0) {
7085                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7086                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7087
7088                         eeprom_phy_id  = (id1 >> 16) << 10;
7089                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7090                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7091                 } else
7092                         eeprom_phy_id = 0;
7093
7094                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7095                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7096                         led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7097                                     SHASTA_EXT_LED_MODE_MASK);
7098                 } else
7099                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7100
7101                 switch (led_cfg) {
7102                 default:
7103                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7104                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7105                         break;
7106
7107                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7108                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7109                         break;
7110
7111                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7112                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7113                         break;
7114
7115                 case SHASTA_EXT_LED_SHARED:
7116                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7117                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7118                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7119                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7120                                                  LED_CTRL_MODE_PHY_2);
7121                         break;
7122
7123                 case SHASTA_EXT_LED_MAC:
7124                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7125                         break;
7126
7127                 case SHASTA_EXT_LED_COMBO:
7128                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7129                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7130                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7131                                                  LED_CTRL_MODE_PHY_2);
7132                         break;
7133
7134                 };
7135
7136                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7137                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7138                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7139                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7140
7141                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7142                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7143                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7144                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7145                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7146
7147                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7148                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7149                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7150                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7151                 }
7152                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7153                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7154
7155                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &cfg2);
7156                 if (cfg2 & (1 << 17))
7157                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7158         }
7159
7160         /* Reading the PHY ID register can conflict with ASF
7161          * firwmare access to the PHY hardware.
7162          */
7163         err = 0;
7164         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7165                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7166         } else {
7167                 /* Now read the physical PHY_ID from the chip and verify
7168                  * that it is sane.  If it doesn't look good, we fall back
7169                  * to either the hard-coded table based PHY_ID and failing
7170                  * that the value found in the eeprom area.
7171                  */
7172                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7173                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7174
7175                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7176                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7177                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7178
7179                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7180         }
7181
7182         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7183                 tp->phy_id = hw_phy_id;
7184                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7185                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7186         } else {
7187                 if (eeprom_signature_found) {
7188                         tp->phy_id = eeprom_phy_id;
7189                         if (eeprom_phy_serdes)
7190                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7191                 } else {
7192                         struct subsys_tbl_ent *p;
7193
7194                         /* No eeprom signature?  Try the hardcoded
7195                          * subsys device table.
7196                          */
7197                         p = lookup_by_subsys(tp);
7198                         if (!p)
7199                                 return -ENODEV;
7200
7201                         tp->phy_id = p->phy_id;
7202                         if (!tp->phy_id ||
7203                             tp->phy_id == PHY_ID_BCM8002)
7204                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7205                 }
7206         }
7207
7208         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7209             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7210                 u32 bmsr, adv_reg, tg3_ctrl;
7211
7212                 tg3_readphy(tp, MII_BMSR, &bmsr);
7213                 tg3_readphy(tp, MII_BMSR, &bmsr);
7214
7215                 if (bmsr & BMSR_LSTATUS)
7216                         goto skip_phy_reset;
7217                     
7218                 err = tg3_phy_reset(tp);
7219                 if (err)
7220                         return err;
7221
7222                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7223                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7224                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7225                 tg3_ctrl = 0;
7226                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7227                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7228                                     MII_TG3_CTRL_ADV_1000_FULL);
7229                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7230                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7231                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7232                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7233                 }
7234
7235                 if (!tg3_copper_is_advertising_all(tp)) {
7236                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7237
7238                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7239                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7240
7241                         tg3_writephy(tp, MII_BMCR,
7242                                      BMCR_ANENABLE | BMCR_ANRESTART);
7243                 }
7244                 tg3_phy_set_wirespeed(tp);
7245
7246                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7247                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7248                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7249         }
7250
7251 skip_phy_reset:
7252         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7253                 err = tg3_init_5401phy_dsp(tp);
7254                 if (err)
7255                         return err;
7256         }
7257
7258         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7259                 err = tg3_init_5401phy_dsp(tp);
7260         }
7261
7262         if (!eeprom_signature_found)
7263                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7264
7265         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7266                 tp->link_config.advertising =
7267                         (ADVERTISED_1000baseT_Half |
7268                          ADVERTISED_1000baseT_Full |
7269                          ADVERTISED_Autoneg |
7270                          ADVERTISED_FIBRE);
7271         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7272                 tp->link_config.advertising &=
7273                         ~(ADVERTISED_1000baseT_Half |
7274                           ADVERTISED_1000baseT_Full);
7275
7276         return err;
7277 }
7278
7279 static void __devinit tg3_read_partno(struct tg3 *tp)
7280 {
7281         unsigned char vpd_data[256];
7282         int i;
7283
7284         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7285                 /* Sun decided not to put the necessary bits in the
7286                  * NVRAM of their onboard tg3 parts :(
7287                  */
7288                 strcpy(tp->board_part_number, "Sun 570X");
7289                 return;
7290         }
7291
7292         for (i = 0; i < 256; i += 4) {
7293                 u32 tmp;
7294
7295                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7296                         goto out_not_found;
7297
7298                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7299                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7300                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7301                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7302         }
7303
7304         /* Now parse and find the part number. */
7305         for (i = 0; i < 256; ) {
7306                 unsigned char val = vpd_data[i];
7307                 int block_end;
7308
7309                 if (val == 0x82 || val == 0x91) {
7310                         i = (i + 3 +
7311                              (vpd_data[i + 1] +
7312                               (vpd_data[i + 2] << 8)));
7313                         continue;
7314                 }
7315
7316                 if (val != 0x90)
7317                         goto out_not_found;
7318
7319                 block_end = (i + 3 +
7320                              (vpd_data[i + 1] +
7321                               (vpd_data[i + 2] << 8)));
7322                 i += 3;
7323                 while (i < block_end) {
7324                         if (vpd_data[i + 0] == 'P' &&
7325                             vpd_data[i + 1] == 'N') {
7326                                 int partno_len = vpd_data[i + 2];
7327
7328                                 if (partno_len > 24)
7329                                         goto out_not_found;
7330
7331                                 memcpy(tp->board_part_number,
7332                                        &vpd_data[i + 3],
7333                                        partno_len);
7334
7335                                 /* Success. */
7336                                 return;
7337                         }
7338                 }
7339
7340                 /* Part number not found. */
7341                 goto out_not_found;
7342         }
7343
7344 out_not_found:
7345         strcpy(tp->board_part_number, "none");
7346 }
7347
7348 #ifdef CONFIG_SPARC64
7349 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7350 {
7351         struct pci_dev *pdev = tp->pdev;
7352         struct pcidev_cookie *pcp = pdev->sysdata;
7353
7354         if (pcp != NULL) {
7355                 int node = pcp->prom_node;
7356                 u32 venid;
7357                 int err;
7358
7359                 err = prom_getproperty(node, "subsystem-vendor-id",
7360                                        (char *) &venid, sizeof(venid));
7361                 if (err == 0 || err == -1)
7362                         return 0;
7363                 if (venid == PCI_VENDOR_ID_SUN)
7364                         return 1;
7365         }
7366         return 0;
7367 }
7368 #endif
7369
7370 static int __devinit tg3_get_invariants(struct tg3 *tp)
7371 {
7372         u32 misc_ctrl_reg;
7373         u32 cacheline_sz_reg;
7374         u32 pci_state_reg, grc_misc_cfg;
7375         u32 val;
7376         u16 pci_cmd;
7377         int err;
7378
7379 #ifdef CONFIG_SPARC64
7380         if (tg3_is_sun_570X(tp))
7381                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7382 #endif
7383
7384         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7385          * reordering to the mailbox registers done by the host
7386          * controller can cause major troubles.  We read back from
7387          * every mailbox register write to force the writes to be
7388          * posted to the chip in order.
7389          */
7390         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7391                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7392             pci_find_device(PCI_VENDOR_ID_INTEL,
7393                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7394             pci_find_device(PCI_VENDOR_ID_INTEL,
7395                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7396             pci_find_device(PCI_VENDOR_ID_INTEL,
7397                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7398             pci_find_device(PCI_VENDOR_ID_AMD,
7399                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7400                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7401
7402         /* Force memory write invalidate off.  If we leave it on,
7403          * then on 5700_BX chips we have to enable a workaround.
7404          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7405          * to match the cacheline size.  The Broadcom driver have this
7406          * workaround but turns MWI off all the times so never uses
7407          * it.  This seems to suggest that the workaround is insufficient.
7408          */
7409         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7410         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7411         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7412
7413         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7414          * has the register indirect write enable bit set before
7415          * we try to access any of the MMIO registers.  It is also
7416          * critical that the PCI-X hw workaround situation is decided
7417          * before that as well.
7418          */
7419         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7420                               &misc_ctrl_reg);
7421
7422         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7423                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7424
7425         /* Initialize misc host control in PCI block. */
7426         tp->misc_host_ctrl |= (misc_ctrl_reg &
7427                                MISC_HOST_CTRL_CHIPREV);
7428         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7429                                tp->misc_host_ctrl);
7430
7431         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7432                               &cacheline_sz_reg);
7433
7434         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7435         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7436         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7437         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7438
7439         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7440                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7441
7442         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7443             tp->pci_lat_timer < 64) {
7444                 tp->pci_lat_timer = 64;
7445
7446                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7447                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7448                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7449                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7450
7451                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7452                                        cacheline_sz_reg);
7453         }
7454
7455         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7456                               &pci_state_reg);
7457
7458         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7459                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7460
7461                 /* If this is a 5700 BX chipset, and we are in PCI-X
7462                  * mode, enable register write workaround.
7463                  *
7464                  * The workaround is to use indirect register accesses
7465                  * for all chip writes not to mailbox registers.
7466                  */
7467                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7468                         u32 pm_reg;
7469                         u16 pci_cmd;
7470
7471                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7472
7473                         /* The chip can have it's power management PCI config
7474                          * space registers clobbered due to this bug.
7475                          * So explicitly force the chip into D0 here.
7476                          */
7477                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7478                                               &pm_reg);
7479                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7480                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7481                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7482                                                pm_reg);
7483
7484                         /* Also, force SERR#/PERR# in PCI command. */
7485                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7486                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7487                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7488                 }
7489         }
7490
7491         /* Back to back register writes can cause problems on this chip,
7492          * the workaround is to read back all reg writes except those to
7493          * mailbox regs.  See tg3_write_indirect_reg32().
7494          *
7495          * PCI Express 5750_A0 rev chips need this workaround too.
7496          */
7497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7498             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7499              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7500                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7501
7502         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7503                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7504         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7505                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7506
7507         /* Chip-specific fixup from Broadcom driver */
7508         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7509             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7510                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7511                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7512         }
7513
7514         /* Force the chip into D0. */
7515         err = tg3_set_power_state(tp, 0);
7516         if (err) {
7517                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7518                        pci_name(tp->pdev));
7519                 return err;
7520         }
7521
7522         /* 5700 B0 chips do not support checksumming correctly due
7523          * to hardware bugs.
7524          */
7525         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7526                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7527
7528         /* Pseudo-header checksum is done by hardware logic and not
7529          * the offload processers, so make the chip do the pseudo-
7530          * header checksums on receive.  For transmit it is more
7531          * convenient to do the pseudo-header checksum in software
7532          * as Linux does that on transmit for us in all cases.
7533          */
7534         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7535         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7536
7537         /* Derive initial jumbo mode from MTU assigned in
7538          * ether_setup() via the alloc_etherdev() call
7539          */
7540         if (tp->dev->mtu > ETH_DATA_LEN)
7541                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7542
7543         /* Determine WakeOnLan speed to use. */
7544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7545             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7546             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7547             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7548                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7549         } else {
7550                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7551         }
7552
7553         /* A few boards don't want Ethernet@WireSpeed phy feature */
7554         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7555             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7556              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7557              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7558                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7559
7560         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7561             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7562                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7563         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7564                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7565
7566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7568                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7569
7570         /* Only 5701 and later support tagged irq status mode.
7571          * Also, 5788 chips cannot use tagged irq status.
7572          *
7573          * However, since we are using NAPI avoid tagged irq status
7574          * because the interrupt condition is more difficult to
7575          * fully clear in that mode.
7576          */
7577         tp->coalesce_mode = 0;
7578
7579         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7580             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7581                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7582
7583         /* Initialize MAC MI mode, polling disabled. */
7584         tw32_f(MAC_MI_MODE, tp->mi_mode);
7585         udelay(80);
7586
7587         /* Initialize data/descriptor byte/word swapping. */
7588         val = tr32(GRC_MODE);
7589         val &= GRC_MODE_HOST_STACKUP;
7590         tw32(GRC_MODE, val | tp->grc_mode);
7591
7592         tg3_switch_clocks(tp);
7593
7594         /* Clear this out for sanity. */
7595         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7596
7597         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7598                               &pci_state_reg);
7599         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7600             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7601                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7602
7603                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7604                     chiprevid == CHIPREV_ID_5701_B0 ||
7605                     chiprevid == CHIPREV_ID_5701_B2 ||
7606                     chiprevid == CHIPREV_ID_5701_B5) {
7607                         void __iomem *sram_base;
7608
7609                         /* Write some dummy words into the SRAM status block
7610                          * area, see if it reads back correctly.  If the return
7611                          * value is bad, force enable the PCIX workaround.
7612                          */
7613                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7614
7615                         writel(0x00000000, sram_base);
7616                         writel(0x00000000, sram_base + 4);
7617                         writel(0xffffffff, sram_base + 4);
7618                         if (readl(sram_base) != 0x00000000)
7619                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7620                 }
7621         }
7622
7623         udelay(50);
7624         tg3_nvram_init(tp);
7625
7626         grc_misc_cfg = tr32(GRC_MISC_CFG);
7627         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7628
7629         /* Broadcom's driver says that CIOBE multisplit has a bug */
7630 #if 0
7631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7632             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7633                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7634                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7635         }
7636 #endif
7637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7638             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7639              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7640                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7641
7642         /* these are limited to 10/100 only */
7643         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7644              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7645             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7646              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7647              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7648               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7649               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7650             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7651              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
7652               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
7653                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7654
7655         err = tg3_phy_probe(tp);
7656         if (err) {
7657                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7658                        pci_name(tp->pdev), err);
7659                 /* ... but do not return immediately ... */
7660         }
7661
7662         tg3_read_partno(tp);
7663
7664         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7665                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7666         } else {
7667                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7668                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7669                 else
7670                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7671         }
7672
7673         /* 5700 {AX,BX} chips have a broken status block link
7674          * change bit implementation, so we must use the
7675          * status register in those cases.
7676          */
7677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7678                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7679         else
7680                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7681
7682         /* The led_ctrl is set during tg3_phy_probe, here we might
7683          * have to force the link status polling mechanism based
7684          * upon subsystem IDs.
7685          */
7686         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7687             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7688                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7689                                   TG3_FLAG_USE_LINKCHG_REG);
7690         }
7691
7692         /* For all SERDES we poll the MAC status register. */
7693         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7694                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7695         else
7696                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7697
7698         /* 5700 BX chips need to have their TX producer index mailboxes
7699          * written twice to workaround a bug.
7700          */
7701         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7702                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7703         else
7704                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7705
7706         /* It seems all chips can get confused if TX buffers
7707          * straddle the 4GB address boundary in some cases.
7708          */
7709         tp->dev->hard_start_xmit = tg3_start_xmit;
7710
7711         tp->rx_offset = 2;
7712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7713             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7714                 tp->rx_offset = 0;
7715
7716         /* By default, disable wake-on-lan.  User can change this
7717          * using ETHTOOL_SWOL.
7718          */
7719         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7720
7721         return err;
7722 }
7723
7724 #ifdef CONFIG_SPARC64
7725 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7726 {
7727         struct net_device *dev = tp->dev;
7728         struct pci_dev *pdev = tp->pdev;
7729         struct pcidev_cookie *pcp = pdev->sysdata;
7730
7731         if (pcp != NULL) {
7732                 int node = pcp->prom_node;
7733
7734                 if (prom_getproplen(node, "local-mac-address") == 6) {
7735                         prom_getproperty(node, "local-mac-address",
7736                                          dev->dev_addr, 6);
7737                         return 0;
7738                 }
7739         }
7740         return -ENODEV;
7741 }
7742
7743 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7744 {
7745         struct net_device *dev = tp->dev;
7746
7747         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7748         return 0;
7749 }
7750 #endif
7751
7752 static int __devinit tg3_get_device_address(struct tg3 *tp)
7753 {
7754         struct net_device *dev = tp->dev;
7755         u32 hi, lo, mac_offset;
7756
7757 #ifdef CONFIG_SPARC64
7758         if (!tg3_get_macaddr_sparc(tp))
7759                 return 0;
7760 #endif
7761
7762         mac_offset = 0x7c;
7763         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7764             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
7765                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7766                         mac_offset = 0xcc;
7767                 if (tg3_nvram_lock(tp))
7768                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7769                 else
7770                         tg3_nvram_unlock(tp);
7771         }
7772
7773         /* First try to get it from MAC address mailbox. */
7774         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7775         if ((hi >> 16) == 0x484b) {
7776                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7777                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7778
7779                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7780                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7781                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7782                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7783                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7784         }
7785         /* Next, try NVRAM. */
7786         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
7787                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7788                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7789                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7790                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7791                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7792                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7793                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7794                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7795         }
7796         /* Finally just fetch it out of the MAC control regs. */
7797         else {
7798                 hi = tr32(MAC_ADDR_0_HIGH);
7799                 lo = tr32(MAC_ADDR_0_LOW);
7800
7801                 dev->dev_addr[5] = lo & 0xff;
7802                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7803                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7804                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7805                 dev->dev_addr[1] = hi & 0xff;
7806                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7807         }
7808
7809         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7810 #ifdef CONFIG_SPARC64
7811                 if (!tg3_get_default_macaddr_sparc(tp))
7812                         return 0;
7813 #endif
7814                 return -EINVAL;
7815         }
7816         return 0;
7817 }
7818
7819 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7820 {
7821         struct tg3_internal_buffer_desc test_desc;
7822         u32 sram_dma_descs;
7823         int i, ret;
7824
7825         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7826
7827         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7828         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7829         tw32(RDMAC_STATUS, 0);
7830         tw32(WDMAC_STATUS, 0);
7831
7832         tw32(BUFMGR_MODE, 0);
7833         tw32(FTQ_RESET, 0);
7834
7835         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7836         test_desc.addr_lo = buf_dma & 0xffffffff;
7837         test_desc.nic_mbuf = 0x00002100;
7838         test_desc.len = size;
7839
7840         /*
7841          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7842          * the *second* time the tg3 driver was getting loaded after an
7843          * initial scan.
7844          *
7845          * Broadcom tells me:
7846          *   ...the DMA engine is connected to the GRC block and a DMA
7847          *   reset may affect the GRC block in some unpredictable way...
7848          *   The behavior of resets to individual blocks has not been tested.
7849          *
7850          * Broadcom noted the GRC reset will also reset all sub-components.
7851          */
7852         if (to_device) {
7853                 test_desc.cqid_sqid = (13 << 8) | 2;
7854
7855                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7856                 udelay(40);
7857         } else {
7858                 test_desc.cqid_sqid = (16 << 8) | 7;
7859
7860                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7861                 udelay(40);
7862         }
7863         test_desc.flags = 0x00000005;
7864
7865         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7866                 u32 val;
7867
7868                 val = *(((u32 *)&test_desc) + i);
7869                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7870                                        sram_dma_descs + (i * sizeof(u32)));
7871                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7872         }
7873         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7874
7875         if (to_device) {
7876                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7877         } else {
7878                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7879         }
7880
7881         ret = -ENODEV;
7882         for (i = 0; i < 40; i++) {
7883                 u32 val;
7884
7885                 if (to_device)
7886                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7887                 else
7888                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7889                 if ((val & 0xffff) == sram_dma_descs) {
7890                         ret = 0;
7891                         break;
7892                 }
7893
7894                 udelay(100);
7895         }
7896
7897         return ret;
7898 }
7899
7900 #define TEST_BUFFER_SIZE        0x400
7901
7902 static int __devinit tg3_test_dma(struct tg3 *tp)
7903 {
7904         dma_addr_t buf_dma;
7905         u32 *buf;
7906         int ret;
7907
7908         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7909         if (!buf) {
7910                 ret = -ENOMEM;
7911                 goto out_nofree;
7912         }
7913
7914         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7915                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7916
7917 #ifndef CONFIG_X86
7918         {
7919                 u8 byte;
7920                 int cacheline_size;
7921                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7922
7923                 if (byte == 0)
7924                         cacheline_size = 1024;
7925                 else
7926                         cacheline_size = (int) byte * 4;
7927
7928                 switch (cacheline_size) {
7929                 case 16:
7930                 case 32:
7931                 case 64:
7932                 case 128:
7933                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7934                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7935                                 tp->dma_rwctrl |=
7936                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7937                                 break;
7938                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7939                                 tp->dma_rwctrl &=
7940                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
7941                                 tp->dma_rwctrl |=
7942                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7943                                 break;
7944                         }
7945                         /* fallthrough */
7946                 case 256:
7947                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7948                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7949                                 tp->dma_rwctrl |=
7950                                         DMA_RWCTRL_WRITE_BNDRY_256;
7951                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7952                                 tp->dma_rwctrl |=
7953                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7954                 };
7955         }
7956 #endif
7957
7958         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7959                 /* DMA read watermark not used on PCIE */
7960                 tp->dma_rwctrl |= 0x00180000;
7961         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7962                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7963                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7964                         tp->dma_rwctrl |= 0x003f0000;
7965                 else
7966                         tp->dma_rwctrl |= 0x003f000f;
7967         } else {
7968                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7969                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7970                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7971
7972                         if (ccval == 0x6 || ccval == 0x7)
7973                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7974
7975                         /* Set bit 23 to renable PCIX hw bug fix */
7976                         tp->dma_rwctrl |= 0x009f0000;
7977                 } else {
7978                         tp->dma_rwctrl |= 0x001b000f;
7979                 }
7980         }
7981
7982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7983             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7984                 tp->dma_rwctrl &= 0xfffffff0;
7985
7986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7988                 /* Remove this if it causes problems for some boards. */
7989                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7990
7991                 /* On 5700/5701 chips, we need to set this bit.
7992                  * Otherwise the chip will issue cacheline transactions
7993                  * to streamable DMA memory with not all the byte
7994                  * enables turned on.  This is an error on several
7995                  * RISC PCI controllers, in particular sparc64.
7996                  *
7997                  * On 5703/5704 chips, this bit has been reassigned
7998                  * a different meaning.  In particular, it is used
7999                  * on those chips to enable a PCI-X workaround.
8000                  */
8001                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8002         }
8003
8004         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8005
8006 #if 0
8007         /* Unneeded, already done by tg3_get_invariants.  */
8008         tg3_switch_clocks(tp);
8009 #endif
8010
8011         ret = 0;
8012         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8013             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8014                 goto out;
8015
8016         while (1) {
8017                 u32 *p = buf, i;
8018
8019                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8020                         p[i] = i;
8021
8022                 /* Send the buffer to the chip. */
8023                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8024                 if (ret) {
8025                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8026                         break;
8027                 }
8028
8029 #if 0
8030                 /* validate data reached card RAM correctly. */
8031                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8032                         u32 val;
8033                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8034                         if (le32_to_cpu(val) != p[i]) {
8035                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8036                                 /* ret = -ENODEV here? */
8037                         }
8038                         p[i] = 0;
8039                 }
8040 #endif
8041                 /* Now read it back. */
8042                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8043                 if (ret) {
8044                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8045
8046                         break;
8047                 }
8048
8049                 /* Verify it. */
8050                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8051                         if (p[i] == i)
8052                                 continue;
8053
8054                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8055                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8056                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8057                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8058                                 break;
8059                         } else {
8060                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8061                                 ret = -ENODEV;
8062                                 goto out;
8063                         }
8064                 }
8065
8066                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8067                         /* Success. */
8068                         ret = 0;
8069                         break;
8070                 }
8071         }
8072
8073 out:
8074         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8075 out_nofree:
8076         return ret;
8077 }
8078
8079 static void __devinit tg3_init_link_config(struct tg3 *tp)
8080 {
8081         tp->link_config.advertising =
8082                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8083                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8084                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8085                  ADVERTISED_Autoneg | ADVERTISED_MII);
8086         tp->link_config.speed = SPEED_INVALID;
8087         tp->link_config.duplex = DUPLEX_INVALID;
8088         tp->link_config.autoneg = AUTONEG_ENABLE;
8089         netif_carrier_off(tp->dev);
8090         tp->link_config.active_speed = SPEED_INVALID;
8091         tp->link_config.active_duplex = DUPLEX_INVALID;
8092         tp->link_config.phy_is_low_power = 0;
8093         tp->link_config.orig_speed = SPEED_INVALID;
8094         tp->link_config.orig_duplex = DUPLEX_INVALID;
8095         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8096 }
8097
8098 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8099 {
8100         tp->bufmgr_config.mbuf_read_dma_low_water =
8101                 DEFAULT_MB_RDMA_LOW_WATER;
8102         tp->bufmgr_config.mbuf_mac_rx_low_water =
8103                 DEFAULT_MB_MACRX_LOW_WATER;
8104         tp->bufmgr_config.mbuf_high_water =
8105                 DEFAULT_MB_HIGH_WATER;
8106
8107         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8108                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8109         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8110                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8111         tp->bufmgr_config.mbuf_high_water_jumbo =
8112                 DEFAULT_MB_HIGH_WATER_JUMBO;
8113
8114         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8115         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8116 }
8117
8118 static char * __devinit tg3_phy_string(struct tg3 *tp)
8119 {
8120         switch (tp->phy_id & PHY_ID_MASK) {
8121         case PHY_ID_BCM5400:    return "5400";
8122         case PHY_ID_BCM5401:    return "5401";
8123         case PHY_ID_BCM5411:    return "5411";
8124         case PHY_ID_BCM5701:    return "5701";
8125         case PHY_ID_BCM5703:    return "5703";
8126         case PHY_ID_BCM5704:    return "5704";
8127         case PHY_ID_BCM5705:    return "5705";
8128         case PHY_ID_BCM5750:    return "5750";
8129         case PHY_ID_BCM8002:    return "8002/serdes";
8130         case 0:                 return "serdes";
8131         default:                return "unknown";
8132         };
8133 }
8134
8135 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8136 {
8137         struct pci_dev *peer;
8138         unsigned int func, devnr = tp->pdev->devfn & ~7;
8139
8140         for (func = 0; func < 8; func++) {
8141                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8142                 if (peer && peer != tp->pdev)
8143                         break;
8144                 pci_dev_put(peer);
8145         }
8146         if (!peer || peer == tp->pdev)
8147                 BUG();
8148
8149         /*
8150          * We don't need to keep the refcount elevated; there's no way
8151          * to remove one half of this device without removing the other
8152          */
8153         pci_dev_put(peer);
8154
8155         return peer;
8156 }
8157
8158 static int __devinit tg3_init_one(struct pci_dev *pdev,
8159                                   const struct pci_device_id *ent)
8160 {
8161         static int tg3_version_printed = 0;
8162         unsigned long tg3reg_base, tg3reg_len;
8163         struct net_device *dev;
8164         struct tg3 *tp;
8165         int i, err, pci_using_dac, pm_cap;
8166
8167         if (tg3_version_printed++ == 0)
8168                 printk(KERN_INFO "%s", version);
8169
8170         err = pci_enable_device(pdev);
8171         if (err) {
8172                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8173                        "aborting.\n");
8174                 return err;
8175         }
8176
8177         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8178                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8179                        "base address, aborting.\n");
8180                 err = -ENODEV;
8181                 goto err_out_disable_pdev;
8182         }
8183
8184         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8185         if (err) {
8186                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8187                        "aborting.\n");
8188                 goto err_out_disable_pdev;
8189         }
8190
8191         pci_set_master(pdev);
8192
8193         /* Find power-management capability. */
8194         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8195         if (pm_cap == 0) {
8196                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8197                        "aborting.\n");
8198                 err = -EIO;
8199                 goto err_out_free_res;
8200         }
8201
8202         /* Configure DMA attributes. */
8203         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8204         if (!err) {
8205                 pci_using_dac = 1;
8206                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8207                 if (err < 0) {
8208                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8209                                "for consistent allocations\n");
8210                         goto err_out_free_res;
8211                 }
8212         } else {
8213                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8214                 if (err) {
8215                         printk(KERN_ERR PFX "No usable DMA configuration, "
8216                                "aborting.\n");
8217                         goto err_out_free_res;
8218                 }
8219                 pci_using_dac = 0;
8220         }
8221
8222         tg3reg_base = pci_resource_start(pdev, 0);
8223         tg3reg_len = pci_resource_len(pdev, 0);
8224
8225         dev = alloc_etherdev(sizeof(*tp));
8226         if (!dev) {
8227                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8228                 err = -ENOMEM;
8229                 goto err_out_free_res;
8230         }
8231
8232         SET_MODULE_OWNER(dev);
8233         SET_NETDEV_DEV(dev, &pdev->dev);
8234
8235         if (pci_using_dac)
8236                 dev->features |= NETIF_F_HIGHDMA;
8237         dev->features |= NETIF_F_LLTX;
8238 #if TG3_VLAN_TAG_USED
8239         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8240         dev->vlan_rx_register = tg3_vlan_rx_register;
8241         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8242 #endif
8243
8244         tp = netdev_priv(dev);
8245         tp->pdev = pdev;
8246         tp->dev = dev;
8247         tp->pm_cap = pm_cap;
8248         tp->mac_mode = TG3_DEF_MAC_MODE;
8249         tp->rx_mode = TG3_DEF_RX_MODE;
8250         tp->tx_mode = TG3_DEF_TX_MODE;
8251         tp->mi_mode = MAC_MI_MODE_BASE;
8252         if (tg3_debug > 0)
8253                 tp->msg_enable = tg3_debug;
8254         else
8255                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8256
8257         /* The word/byte swap controls here control register access byte
8258          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8259          * setting below.
8260          */
8261         tp->misc_host_ctrl =
8262                 MISC_HOST_CTRL_MASK_PCI_INT |
8263                 MISC_HOST_CTRL_WORD_SWAP |
8264                 MISC_HOST_CTRL_INDIR_ACCESS |
8265                 MISC_HOST_CTRL_PCISTATE_RW;
8266
8267         /* The NONFRM (non-frame) byte/word swap controls take effect
8268          * on descriptor entries, anything which isn't packet data.
8269          *
8270          * The StrongARM chips on the board (one for tx, one for rx)
8271          * are running in big-endian mode.
8272          */
8273         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8274                         GRC_MODE_WSWAP_NONFRM_DATA);
8275 #ifdef __BIG_ENDIAN
8276         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8277 #endif
8278         spin_lock_init(&tp->lock);
8279         spin_lock_init(&tp->tx_lock);
8280         spin_lock_init(&tp->indirect_lock);
8281         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8282
8283         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8284         if (tp->regs == 0UL) {
8285                 printk(KERN_ERR PFX "Cannot map device registers, "
8286                        "aborting.\n");
8287                 err = -ENOMEM;
8288                 goto err_out_free_dev;
8289         }
8290
8291         tg3_init_link_config(tp);
8292
8293         tg3_init_bufmgr_config(tp);
8294
8295         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8296         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8297         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8298
8299         dev->open = tg3_open;
8300         dev->stop = tg3_close;
8301         dev->get_stats = tg3_get_stats;
8302         dev->set_multicast_list = tg3_set_rx_mode;
8303         dev->set_mac_address = tg3_set_mac_addr;
8304         dev->do_ioctl = tg3_ioctl;
8305         dev->tx_timeout = tg3_tx_timeout;
8306         dev->poll = tg3_poll;
8307         dev->ethtool_ops = &tg3_ethtool_ops;
8308         dev->weight = 64;
8309         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8310         dev->change_mtu = tg3_change_mtu;
8311         dev->irq = pdev->irq;
8312 #ifdef CONFIG_NET_POLL_CONTROLLER
8313         dev->poll_controller = tg3_poll_controller;
8314 #endif
8315
8316         err = tg3_get_invariants(tp);
8317         if (err) {
8318                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8319                        "aborting.\n");
8320                 goto err_out_iounmap;
8321         }
8322
8323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8324             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8325                 tp->bufmgr_config.mbuf_read_dma_low_water =
8326                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8327                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8328                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8329                 tp->bufmgr_config.mbuf_high_water =
8330                         DEFAULT_MB_HIGH_WATER_5705;
8331         }
8332
8333 #if TG3_TSO_SUPPORT != 0
8334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8335             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8336             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8337             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8338              GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8339                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8340         } else {
8341                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8342         }
8343
8344         /* TSO is off by default, user can enable using ethtool.  */
8345 #if 0
8346         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8347                 dev->features |= NETIF_F_TSO;
8348 #endif
8349
8350 #endif
8351
8352         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8353             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8354             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8355                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8356                 tp->rx_pending = 63;
8357         }
8358
8359         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8360                 tp->pdev_peer = tg3_find_5704_peer(tp);
8361
8362         err = tg3_get_device_address(tp);
8363         if (err) {
8364                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8365                        "aborting.\n");
8366                 goto err_out_iounmap;
8367         }
8368
8369         /*
8370          * Reset chip in case UNDI or EFI driver did not shutdown
8371          * DMA self test will enable WDMAC and we'll see (spurious)
8372          * pending DMA on the PCI bus at that point.
8373          */
8374         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8375             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8376                 pci_save_state(tp->pdev);
8377                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8378                 tg3_halt(tp);
8379         }
8380
8381         err = tg3_test_dma(tp);
8382         if (err) {
8383                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8384                 goto err_out_iounmap;
8385         }
8386
8387         /* Tigon3 can do ipv4 only... and some chips have buggy
8388          * checksumming.
8389          */
8390         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8391                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8392                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8393         } else
8394                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8395
8396         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8397                 dev->features &= ~NETIF_F_HIGHDMA;
8398
8399         /* flow control autonegotiation is default behavior */
8400         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8401
8402         err = register_netdev(dev);
8403         if (err) {
8404                 printk(KERN_ERR PFX "Cannot register net device, "
8405                        "aborting.\n");
8406                 goto err_out_iounmap;
8407         }
8408
8409         pci_set_drvdata(pdev, dev);
8410
8411         /* Now that we have fully setup the chip, save away a snapshot
8412          * of the PCI config space.  We need to restore this after
8413          * GRC_MISC_CFG core clock resets and some resume events.
8414          */
8415         pci_save_state(tp->pdev);
8416
8417         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8418                dev->name,
8419                tp->board_part_number,
8420                tp->pci_chip_rev_id,
8421                tg3_phy_string(tp),
8422                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8423                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8424                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8425                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8426                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8427                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8428
8429         for (i = 0; i < 6; i++)
8430                 printk("%2.2x%c", dev->dev_addr[i],
8431                        i == 5 ? '\n' : ':');
8432
8433         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8434                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8435                "TSOcap[%d] \n",
8436                dev->name,
8437                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8438                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8439                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8440                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8441                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8442                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8443                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8444
8445         return 0;
8446
8447 err_out_iounmap:
8448         iounmap(tp->regs);
8449
8450 err_out_free_dev:
8451         free_netdev(dev);
8452
8453 err_out_free_res:
8454         pci_release_regions(pdev);
8455
8456 err_out_disable_pdev:
8457         pci_disable_device(pdev);
8458         pci_set_drvdata(pdev, NULL);
8459         return err;
8460 }
8461
8462 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8463 {
8464         struct net_device *dev = pci_get_drvdata(pdev);
8465
8466         if (dev) {
8467                 struct tg3 *tp = netdev_priv(dev);
8468
8469                 unregister_netdev(dev);
8470                 iounmap(tp->regs);
8471                 free_netdev(dev);
8472                 pci_release_regions(pdev);
8473                 pci_disable_device(pdev);
8474                 pci_set_drvdata(pdev, NULL);
8475         }
8476 }
8477
8478 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8479 {
8480         struct net_device *dev = pci_get_drvdata(pdev);
8481         struct tg3 *tp = netdev_priv(dev);
8482         int err;
8483
8484         if (!netif_running(dev))
8485                 return 0;
8486
8487         tg3_netif_stop(tp);
8488
8489         del_timer_sync(&tp->timer);
8490
8491         spin_lock_irq(&tp->lock);
8492         spin_lock(&tp->tx_lock);
8493         tg3_disable_ints(tp);
8494         spin_unlock(&tp->tx_lock);
8495         spin_unlock_irq(&tp->lock);
8496
8497         netif_device_detach(dev);
8498
8499         spin_lock_irq(&tp->lock);
8500         spin_lock(&tp->tx_lock);
8501         tg3_halt(tp);
8502         spin_unlock(&tp->tx_lock);
8503         spin_unlock_irq(&tp->lock);
8504
8505         err = tg3_set_power_state(tp, state);
8506         if (err) {
8507                 spin_lock_irq(&tp->lock);
8508                 spin_lock(&tp->tx_lock);
8509
8510                 tg3_init_hw(tp);
8511
8512                 tp->timer.expires = jiffies + tp->timer_offset;
8513                 add_timer(&tp->timer);
8514
8515                 netif_device_attach(dev);
8516                 tg3_netif_start(tp);
8517
8518                 spin_unlock(&tp->tx_lock);
8519                 spin_unlock_irq(&tp->lock);
8520         }
8521
8522         return err;
8523 }
8524
8525 static int tg3_resume(struct pci_dev *pdev)
8526 {
8527         struct net_device *dev = pci_get_drvdata(pdev);
8528         struct tg3 *tp = netdev_priv(dev);
8529         int err;
8530
8531         if (!netif_running(dev))
8532                 return 0;
8533
8534         pci_restore_state(tp->pdev);
8535
8536         err = tg3_set_power_state(tp, 0);
8537         if (err)
8538                 return err;
8539
8540         netif_device_attach(dev);
8541
8542         spin_lock_irq(&tp->lock);
8543         spin_lock(&tp->tx_lock);
8544
8545         tg3_init_hw(tp);
8546
8547         tp->timer.expires = jiffies + tp->timer_offset;
8548         add_timer(&tp->timer);
8549
8550         tg3_enable_ints(tp);
8551
8552         tg3_netif_start(tp);
8553
8554         spin_unlock(&tp->tx_lock);
8555         spin_unlock_irq(&tp->lock);
8556
8557         return 0;
8558 }
8559
8560 static struct pci_driver tg3_driver = {
8561         .name           = DRV_MODULE_NAME,
8562         .id_table       = tg3_pci_tbl,
8563         .probe          = tg3_init_one,
8564         .remove         = __devexit_p(tg3_remove_one),
8565         .suspend        = tg3_suspend,
8566         .resume         = tg3_resume
8567 };
8568
8569 static int __init tg3_init(void)
8570 {
8571         return pci_module_init(&tg3_driver);
8572 }
8573
8574 static void __exit tg3_cleanup(void)
8575 {
8576         pci_unregister_driver(&tg3_driver);
8577 }
8578
8579 module_init(tg3_init);
8580 module_exit(tg3_cleanup);