upgrade to fedora-2.6.12-1.1398.FC4 + vserver 2.0.rc7
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.31"
70 #define DRV_MODULE_RELDATE      "June 8, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154 MODULE_VERSION(DRV_MODULE_VERSION);
155
156 static struct pci_device_id tg3_pci_tbl[] = {
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { 0, }
242 };
243
244 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
245
246 static struct {
247         const char string[ETH_GSTRING_LEN];
248 } ethtool_stats_keys[TG3_NUM_STATS] = {
249         { "rx_octets" },
250         { "rx_fragments" },
251         { "rx_ucast_packets" },
252         { "rx_mcast_packets" },
253         { "rx_bcast_packets" },
254         { "rx_fcs_errors" },
255         { "rx_align_errors" },
256         { "rx_xon_pause_rcvd" },
257         { "rx_xoff_pause_rcvd" },
258         { "rx_mac_ctrl_rcvd" },
259         { "rx_xoff_entered" },
260         { "rx_frame_too_long_errors" },
261         { "rx_jabbers" },
262         { "rx_undersize_packets" },
263         { "rx_in_length_errors" },
264         { "rx_out_length_errors" },
265         { "rx_64_or_less_octet_packets" },
266         { "rx_65_to_127_octet_packets" },
267         { "rx_128_to_255_octet_packets" },
268         { "rx_256_to_511_octet_packets" },
269         { "rx_512_to_1023_octet_packets" },
270         { "rx_1024_to_1522_octet_packets" },
271         { "rx_1523_to_2047_octet_packets" },
272         { "rx_2048_to_4095_octet_packets" },
273         { "rx_4096_to_8191_octet_packets" },
274         { "rx_8192_to_9022_octet_packets" },
275
276         { "tx_octets" },
277         { "tx_collisions" },
278
279         { "tx_xon_sent" },
280         { "tx_xoff_sent" },
281         { "tx_flow_control" },
282         { "tx_mac_errors" },
283         { "tx_single_collisions" },
284         { "tx_mult_collisions" },
285         { "tx_deferred" },
286         { "tx_excessive_collisions" },
287         { "tx_late_collisions" },
288         { "tx_collide_2times" },
289         { "tx_collide_3times" },
290         { "tx_collide_4times" },
291         { "tx_collide_5times" },
292         { "tx_collide_6times" },
293         { "tx_collide_7times" },
294         { "tx_collide_8times" },
295         { "tx_collide_9times" },
296         { "tx_collide_10times" },
297         { "tx_collide_11times" },
298         { "tx_collide_12times" },
299         { "tx_collide_13times" },
300         { "tx_collide_14times" },
301         { "tx_collide_15times" },
302         { "tx_ucast_packets" },
303         { "tx_mcast_packets" },
304         { "tx_bcast_packets" },
305         { "tx_carrier_sense_errors" },
306         { "tx_discards" },
307         { "tx_errors" },
308
309         { "dma_writeq_full" },
310         { "dma_write_prioq_full" },
311         { "rxbds_empty" },
312         { "rx_discards" },
313         { "rx_errors" },
314         { "rx_threshold_hit" },
315
316         { "dma_readq_full" },
317         { "dma_read_prioq_full" },
318         { "tx_comp_queue_full" },
319
320         { "ring_set_send_prod_index" },
321         { "ring_status_update" },
322         { "nic_irqs" },
323         { "nic_avoided_irqs" },
324         { "nic_tx_threshold_hit" }
325 };
326
327 static struct {
328         const char string[ETH_GSTRING_LEN];
329 } ethtool_test_keys[TG3_NUM_TEST] = {
330         { "nvram test     (online) " },
331         { "link test      (online) " },
332         { "register test  (offline)" },
333         { "memory test    (offline)" },
334         { "loopback test  (offline)" },
335         { "interrupt test (offline)" },
336 };
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
341                 unsigned long flags;
342
343                 spin_lock_irqsave(&tp->indirect_lock, flags);
344                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
345                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
346                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
347         } else {
348                 writel(val, tp->regs + off);
349                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
350                         readl(tp->regs + off);
351         }
352 }
353
354 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
355 {
356         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
357                 unsigned long flags;
358
359                 spin_lock_irqsave(&tp->indirect_lock, flags);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
362                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363         } else {
364                 void __iomem *dest = tp->regs + off;
365                 writel(val, dest);
366                 readl(dest);    /* always flush PCI write */
367         }
368 }
369
370 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
371 {
372         void __iomem *mbox = tp->regs + off;
373         writel(val, mbox);
374         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
375                 readl(mbox);
376 }
377
378 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
379 {
380         void __iomem *mbox = tp->regs + off;
381         writel(val, mbox);
382         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
383                 writel(val, mbox);
384         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
385                 readl(mbox);
386 }
387
388 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
389 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
390 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
391
392 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
393 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
394 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
395 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
396 #define tr32(reg)               readl(tp->regs + (reg))
397 #define tr16(reg)               readw(tp->regs + (reg))
398 #define tr8(reg)                readb(tp->regs + (reg))
399
400 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
401 {
402         unsigned long flags;
403
404         spin_lock_irqsave(&tp->indirect_lock, flags);
405         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
406         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
407
408         /* Always leave this as zero. */
409         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
410         spin_unlock_irqrestore(&tp->indirect_lock, flags);
411 }
412
413 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
414 {
415         unsigned long flags;
416
417         spin_lock_irqsave(&tp->indirect_lock, flags);
418         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
419         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
420
421         /* Always leave this as zero. */
422         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
423         spin_unlock_irqrestore(&tp->indirect_lock, flags);
424 }
425
426 static void tg3_disable_ints(struct tg3 *tp)
427 {
428         tw32(TG3PCI_MISC_HOST_CTRL,
429              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
430         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
431         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
432 }
433
434 static inline void tg3_cond_int(struct tg3 *tp)
435 {
436         if (tp->hw_status->status & SD_STATUS_UPDATED)
437                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
438 }
439
440 static void tg3_enable_ints(struct tg3 *tp)
441 {
442         tw32(TG3PCI_MISC_HOST_CTRL,
443              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
444         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
445                      (tp->last_tag << 24));
446         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
447
448         tg3_cond_int(tp);
449 }
450
451 static inline unsigned int tg3_has_work(struct tg3 *tp)
452 {
453         struct tg3_hw_status *sblk = tp->hw_status;
454         unsigned int work_exists = 0;
455
456         /* check for phy events */
457         if (!(tp->tg3_flags &
458               (TG3_FLAG_USE_LINKCHG_REG |
459                TG3_FLAG_POLL_SERDES))) {
460                 if (sblk->status & SD_STATUS_LINK_CHG)
461                         work_exists = 1;
462         }
463         /* check for RX/TX work to do */
464         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
465             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
466                 work_exists = 1;
467
468         return work_exists;
469 }
470
471 /* tg3_restart_ints
472  *  similar to tg3_enable_ints, but it accurately determines whether there
473  *  is new work pending and can return without flushing the PIO write
474  *  which reenables interrupts 
475  */
476 static void tg3_restart_ints(struct tg3 *tp)
477 {
478         tw32(TG3PCI_MISC_HOST_CTRL,
479                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
480         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
481                      tp->last_tag << 24);
482         mmiowb();
483
484         /* When doing tagged status, this work check is unnecessary.
485          * The last_tag we write above tells the chip which piece of
486          * work we've completed.
487          */
488         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
489             tg3_has_work(tp))
490                 tw32(HOSTCC_MODE, tp->coalesce_mode |
491                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
492 }
493
494 static inline void tg3_netif_stop(struct tg3 *tp)
495 {
496         netif_poll_disable(tp->dev);
497         netif_tx_disable(tp->dev);
498 }
499
500 static inline void tg3_netif_start(struct tg3 *tp)
501 {
502         netif_wake_queue(tp->dev);
503         /* NOTE: unconditional netif_wake_queue is only appropriate
504          * so long as all callers are assured to have free tx slots
505          * (such as after tg3_init_hw)
506          */
507         netif_poll_enable(tp->dev);
508         tg3_cond_int(tp);
509 }
510
511 static void tg3_switch_clocks(struct tg3 *tp)
512 {
513         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
514         u32 orig_clock_ctrl;
515
516         orig_clock_ctrl = clock_ctrl;
517         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
518                        CLOCK_CTRL_CLKRUN_OENABLE |
519                        0x1f);
520         tp->pci_clock_ctrl = clock_ctrl;
521
522         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
523                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
524                         tw32_f(TG3PCI_CLOCK_CTRL,
525                                clock_ctrl | CLOCK_CTRL_625_CORE);
526                         udelay(40);
527                 }
528         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
529                 tw32_f(TG3PCI_CLOCK_CTRL,
530                      clock_ctrl |
531                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
532                 udelay(40);
533                 tw32_f(TG3PCI_CLOCK_CTRL,
534                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
535                 udelay(40);
536         }
537         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
538         udelay(40);
539 }
540
541 #define PHY_BUSY_LOOPS  5000
542
543 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
544 {
545         u32 frame_val;
546         unsigned int loops;
547         int ret;
548
549         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
550                 tw32_f(MAC_MI_MODE,
551                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
552                 udelay(80);
553         }
554
555         *val = 0x0;
556
557         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
558                       MI_COM_PHY_ADDR_MASK);
559         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
560                       MI_COM_REG_ADDR_MASK);
561         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
562         
563         tw32_f(MAC_MI_COM, frame_val);
564
565         loops = PHY_BUSY_LOOPS;
566         while (loops != 0) {
567                 udelay(10);
568                 frame_val = tr32(MAC_MI_COM);
569
570                 if ((frame_val & MI_COM_BUSY) == 0) {
571                         udelay(5);
572                         frame_val = tr32(MAC_MI_COM);
573                         break;
574                 }
575                 loops -= 1;
576         }
577
578         ret = -EBUSY;
579         if (loops != 0) {
580                 *val = frame_val & MI_COM_DATA_MASK;
581                 ret = 0;
582         }
583
584         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
585                 tw32_f(MAC_MI_MODE, tp->mi_mode);
586                 udelay(80);
587         }
588
589         return ret;
590 }
591
592 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
593 {
594         u32 frame_val;
595         unsigned int loops;
596         int ret;
597
598         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
599                 tw32_f(MAC_MI_MODE,
600                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
601                 udelay(80);
602         }
603
604         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
605                       MI_COM_PHY_ADDR_MASK);
606         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
607                       MI_COM_REG_ADDR_MASK);
608         frame_val |= (val & MI_COM_DATA_MASK);
609         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
610         
611         tw32_f(MAC_MI_COM, frame_val);
612
613         loops = PHY_BUSY_LOOPS;
614         while (loops != 0) {
615                 udelay(10);
616                 frame_val = tr32(MAC_MI_COM);
617                 if ((frame_val & MI_COM_BUSY) == 0) {
618                         udelay(5);
619                         frame_val = tr32(MAC_MI_COM);
620                         break;
621                 }
622                 loops -= 1;
623         }
624
625         ret = -EBUSY;
626         if (loops != 0)
627                 ret = 0;
628
629         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
630                 tw32_f(MAC_MI_MODE, tp->mi_mode);
631                 udelay(80);
632         }
633
634         return ret;
635 }
636
637 static void tg3_phy_set_wirespeed(struct tg3 *tp)
638 {
639         u32 val;
640
641         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
642                 return;
643
644         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
645             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
646                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
647                              (val | (1 << 15) | (1 << 4)));
648 }
649
650 static int tg3_bmcr_reset(struct tg3 *tp)
651 {
652         u32 phy_control;
653         int limit, err;
654
655         /* OK, reset it, and poll the BMCR_RESET bit until it
656          * clears or we time out.
657          */
658         phy_control = BMCR_RESET;
659         err = tg3_writephy(tp, MII_BMCR, phy_control);
660         if (err != 0)
661                 return -EBUSY;
662
663         limit = 5000;
664         while (limit--) {
665                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
666                 if (err != 0)
667                         return -EBUSY;
668
669                 if ((phy_control & BMCR_RESET) == 0) {
670                         udelay(40);
671                         break;
672                 }
673                 udelay(10);
674         }
675         if (limit <= 0)
676                 return -EBUSY;
677
678         return 0;
679 }
680
681 static int tg3_wait_macro_done(struct tg3 *tp)
682 {
683         int limit = 100;
684
685         while (limit--) {
686                 u32 tmp32;
687
688                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
689                         if ((tmp32 & 0x1000) == 0)
690                                 break;
691                 }
692         }
693         if (limit <= 0)
694                 return -EBUSY;
695
696         return 0;
697 }
698
699 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
700 {
701         static const u32 test_pat[4][6] = {
702         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
703         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
704         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
705         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
706         };
707         int chan;
708
709         for (chan = 0; chan < 4; chan++) {
710                 int i;
711
712                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
713                              (chan * 0x2000) | 0x0200);
714                 tg3_writephy(tp, 0x16, 0x0002);
715
716                 for (i = 0; i < 6; i++)
717                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
718                                      test_pat[chan][i]);
719
720                 tg3_writephy(tp, 0x16, 0x0202);
721                 if (tg3_wait_macro_done(tp)) {
722                         *resetp = 1;
723                         return -EBUSY;
724                 }
725
726                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
727                              (chan * 0x2000) | 0x0200);
728                 tg3_writephy(tp, 0x16, 0x0082);
729                 if (tg3_wait_macro_done(tp)) {
730                         *resetp = 1;
731                         return -EBUSY;
732                 }
733
734                 tg3_writephy(tp, 0x16, 0x0802);
735                 if (tg3_wait_macro_done(tp)) {
736                         *resetp = 1;
737                         return -EBUSY;
738                 }
739
740                 for (i = 0; i < 6; i += 2) {
741                         u32 low, high;
742
743                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
744                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
745                             tg3_wait_macro_done(tp)) {
746                                 *resetp = 1;
747                                 return -EBUSY;
748                         }
749                         low &= 0x7fff;
750                         high &= 0x000f;
751                         if (low != test_pat[chan][i] ||
752                             high != test_pat[chan][i+1]) {
753                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
754                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
755                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
756
757                                 return -EBUSY;
758                         }
759                 }
760         }
761
762         return 0;
763 }
764
765 static int tg3_phy_reset_chanpat(struct tg3 *tp)
766 {
767         int chan;
768
769         for (chan = 0; chan < 4; chan++) {
770                 int i;
771
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
773                              (chan * 0x2000) | 0x0200);
774                 tg3_writephy(tp, 0x16, 0x0002);
775                 for (i = 0; i < 6; i++)
776                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
777                 tg3_writephy(tp, 0x16, 0x0202);
778                 if (tg3_wait_macro_done(tp))
779                         return -EBUSY;
780         }
781
782         return 0;
783 }
784
785 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
786 {
787         u32 reg32, phy9_orig;
788         int retries, do_phy_reset, err;
789
790         retries = 10;
791         do_phy_reset = 1;
792         do {
793                 if (do_phy_reset) {
794                         err = tg3_bmcr_reset(tp);
795                         if (err)
796                                 return err;
797                         do_phy_reset = 0;
798                 }
799
800                 /* Disable transmitter and interrupt.  */
801                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
802                         continue;
803
804                 reg32 |= 0x3000;
805                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
806
807                 /* Set full-duplex, 1000 mbps.  */
808                 tg3_writephy(tp, MII_BMCR,
809                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
810
811                 /* Set to master mode.  */
812                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
813                         continue;
814
815                 tg3_writephy(tp, MII_TG3_CTRL,
816                              (MII_TG3_CTRL_AS_MASTER |
817                               MII_TG3_CTRL_ENABLE_AS_MASTER));
818
819                 /* Enable SM_DSP_CLOCK and 6dB.  */
820                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
821
822                 /* Block the PHY control access.  */
823                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
824                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
825
826                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
827                 if (!err)
828                         break;
829         } while (--retries);
830
831         err = tg3_phy_reset_chanpat(tp);
832         if (err)
833                 return err;
834
835         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
836         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
837
838         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
839         tg3_writephy(tp, 0x16, 0x0000);
840
841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
843                 /* Set Extended packet length bit for jumbo frames */
844                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
845         }
846         else {
847                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
848         }
849
850         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
851
852         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
853                 reg32 &= ~0x3000;
854                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
855         } else if (!err)
856                 err = -EBUSY;
857
858         return err;
859 }
860
861 /* This will reset the tigon3 PHY if there is no valid
862  * link unless the FORCE argument is non-zero.
863  */
864 static int tg3_phy_reset(struct tg3 *tp)
865 {
866         u32 phy_status;
867         int err;
868
869         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
870         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
871         if (err != 0)
872                 return -EBUSY;
873
874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
875             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
876             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
877                 err = tg3_phy_reset_5703_4_5(tp);
878                 if (err)
879                         return err;
880                 goto out;
881         }
882
883         err = tg3_bmcr_reset(tp);
884         if (err)
885                 return err;
886
887 out:
888         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
889                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
890                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
891                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
892                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
893                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
894                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
895         }
896         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
897                 tg3_writephy(tp, 0x1c, 0x8d68);
898                 tg3_writephy(tp, 0x1c, 0x8d68);
899         }
900         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
901                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
902                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
903                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
904                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
905                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
906                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
907                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
908                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
909         }
910         /* Set Extended packet length bit (bit 14) on all chips that */
911         /* support jumbo frames */
912         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
913                 /* Cannot do read-modify-write on 5401 */
914                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
915         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
916                 u32 phy_reg;
917
918                 /* Set bit 14 with read-modify-write to preserve other bits */
919                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
920                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
921                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
922         }
923
924         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
925          * jumbo frames transmission.
926          */
927         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
928                 u32 phy_reg;
929
930                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
931                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
932                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
933         }
934
935         tg3_phy_set_wirespeed(tp);
936         return 0;
937 }
938
939 static void tg3_frob_aux_power(struct tg3 *tp)
940 {
941         struct tg3 *tp_peer = tp;
942
943         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
944                 return;
945
946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
947                 tp_peer = pci_get_drvdata(tp->pdev_peer);
948                 if (!tp_peer)
949                         BUG();
950         }
951
952
953         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
954             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
955                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
956                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
957                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958                              (GRC_LCLCTRL_GPIO_OE0 |
959                               GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OE2 |
961                               GRC_LCLCTRL_GPIO_OUTPUT0 |
962                               GRC_LCLCTRL_GPIO_OUTPUT1));
963                         udelay(100);
964                 } else {
965                         u32 no_gpio2;
966                         u32 grc_local_ctrl;
967
968                         if (tp_peer != tp &&
969                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
970                                 return;
971
972                         /* On 5753 and variants, GPIO2 cannot be used. */
973                         no_gpio2 = tp->nic_sram_data_cfg &
974                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
975
976                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
977                                          GRC_LCLCTRL_GPIO_OE1 |
978                                          GRC_LCLCTRL_GPIO_OE2 |
979                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
980                                          GRC_LCLCTRL_GPIO_OUTPUT2;
981                         if (no_gpio2) {
982                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
983                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
984                         }
985                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
986                                                 grc_local_ctrl);
987                         udelay(100);
988
989                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
990
991                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
992                                                 grc_local_ctrl);
993                         udelay(100);
994
995                         if (!no_gpio2) {
996                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
997                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
998                                        grc_local_ctrl);
999                                 udelay(100);
1000                         }
1001                 }
1002         } else {
1003                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1004                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1005                         if (tp_peer != tp &&
1006                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1007                                 return;
1008
1009                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1010                              (GRC_LCLCTRL_GPIO_OE1 |
1011                               GRC_LCLCTRL_GPIO_OUTPUT1));
1012                         udelay(100);
1013
1014                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1015                              (GRC_LCLCTRL_GPIO_OE1));
1016                         udelay(100);
1017
1018                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1019                              (GRC_LCLCTRL_GPIO_OE1 |
1020                               GRC_LCLCTRL_GPIO_OUTPUT1));
1021                         udelay(100);
1022                 }
1023         }
1024 }
1025
1026 static int tg3_setup_phy(struct tg3 *, int);
1027
1028 #define RESET_KIND_SHUTDOWN     0
1029 #define RESET_KIND_INIT         1
1030 #define RESET_KIND_SUSPEND      2
1031
1032 static void tg3_write_sig_post_reset(struct tg3 *, int);
1033 static int tg3_halt_cpu(struct tg3 *, u32);
1034
1035 static int tg3_set_power_state(struct tg3 *tp, int state)
1036 {
1037         u32 misc_host_ctrl;
1038         u16 power_control, power_caps;
1039         int pm = tp->pm_cap;
1040
1041         /* Make sure register accesses (indirect or otherwise)
1042          * will function correctly.
1043          */
1044         pci_write_config_dword(tp->pdev,
1045                                TG3PCI_MISC_HOST_CTRL,
1046                                tp->misc_host_ctrl);
1047
1048         pci_read_config_word(tp->pdev,
1049                              pm + PCI_PM_CTRL,
1050                              &power_control);
1051         power_control |= PCI_PM_CTRL_PME_STATUS;
1052         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1053         switch (state) {
1054         case 0:
1055                 power_control |= 0;
1056                 pci_write_config_word(tp->pdev,
1057                                       pm + PCI_PM_CTRL,
1058                                       power_control);
1059                 udelay(100);    /* Delay after power state change */
1060
1061                 /* Switch out of Vaux if it is not a LOM */
1062                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1063                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1064                         udelay(100);
1065                 }
1066
1067                 return 0;
1068
1069         case 1:
1070                 power_control |= 1;
1071                 break;
1072
1073         case 2:
1074                 power_control |= 2;
1075                 break;
1076
1077         case 3:
1078                 power_control |= 3;
1079                 break;
1080
1081         default:
1082                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1083                        "requested.\n",
1084                        tp->dev->name, state);
1085                 return -EINVAL;
1086         };
1087
1088         power_control |= PCI_PM_CTRL_PME_ENABLE;
1089
1090         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1091         tw32(TG3PCI_MISC_HOST_CTRL,
1092              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1093
1094         if (tp->link_config.phy_is_low_power == 0) {
1095                 tp->link_config.phy_is_low_power = 1;
1096                 tp->link_config.orig_speed = tp->link_config.speed;
1097                 tp->link_config.orig_duplex = tp->link_config.duplex;
1098                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1099         }
1100
1101         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1102                 tp->link_config.speed = SPEED_10;
1103                 tp->link_config.duplex = DUPLEX_HALF;
1104                 tp->link_config.autoneg = AUTONEG_ENABLE;
1105                 tg3_setup_phy(tp, 0);
1106         }
1107
1108         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1109
1110         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1111                 u32 mac_mode;
1112
1113                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1114                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1115                         udelay(40);
1116
1117                         mac_mode = MAC_MODE_PORT_MODE_MII;
1118
1119                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1120                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1121                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1122                 } else {
1123                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1124                 }
1125
1126                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1127                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1128
1129                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1130                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1131                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1132
1133                 tw32_f(MAC_MODE, mac_mode);
1134                 udelay(100);
1135
1136                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1137                 udelay(10);
1138         }
1139
1140         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1141             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1142              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1143                 u32 base_val;
1144
1145                 base_val = tp->pci_clock_ctrl;
1146                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1147                              CLOCK_CTRL_TXCLK_DISABLE);
1148
1149                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1150                      CLOCK_CTRL_ALTCLK |
1151                      CLOCK_CTRL_PWRDOWN_PLL133);
1152                 udelay(40);
1153         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1154                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1155                 u32 newbits1, newbits2;
1156
1157                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1158                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1159                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1160                                     CLOCK_CTRL_TXCLK_DISABLE |
1161                                     CLOCK_CTRL_ALTCLK);
1162                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1163                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1164                         newbits1 = CLOCK_CTRL_625_CORE;
1165                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1166                 } else {
1167                         newbits1 = CLOCK_CTRL_ALTCLK;
1168                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1169                 }
1170
1171                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1172                 udelay(40);
1173
1174                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1175                 udelay(40);
1176
1177                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1178                         u32 newbits3;
1179
1180                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1181                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1182                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1183                                             CLOCK_CTRL_TXCLK_DISABLE |
1184                                             CLOCK_CTRL_44MHZ_CORE);
1185                         } else {
1186                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1187                         }
1188
1189                         tw32_f(TG3PCI_CLOCK_CTRL,
1190                                          tp->pci_clock_ctrl | newbits3);
1191                         udelay(40);
1192                 }
1193         }
1194
1195         tg3_frob_aux_power(tp);
1196
1197         /* Workaround for unstable PLL clock */
1198         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1199             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1200                 u32 val = tr32(0x7d00);
1201
1202                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1203                 tw32(0x7d00, val);
1204                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1205                         tg3_halt_cpu(tp, RX_CPU_BASE);
1206         }
1207
1208         /* Finally, set the new power state. */
1209         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1210         udelay(100);    /* Delay after power state change */
1211
1212         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1213
1214         return 0;
1215 }
1216
1217 static void tg3_link_report(struct tg3 *tp)
1218 {
1219         if (!netif_carrier_ok(tp->dev)) {
1220                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1221         } else {
1222                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1223                        tp->dev->name,
1224                        (tp->link_config.active_speed == SPEED_1000 ?
1225                         1000 :
1226                         (tp->link_config.active_speed == SPEED_100 ?
1227                          100 : 10)),
1228                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1229                         "full" : "half"));
1230
1231                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1232                        "%s for RX.\n",
1233                        tp->dev->name,
1234                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1235                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1236         }
1237 }
1238
1239 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1240 {
1241         u32 new_tg3_flags = 0;
1242         u32 old_rx_mode = tp->rx_mode;
1243         u32 old_tx_mode = tp->tx_mode;
1244
1245         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1246                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1247                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1248                                 if (remote_adv & LPA_PAUSE_CAP)
1249                                         new_tg3_flags |=
1250                                                 (TG3_FLAG_RX_PAUSE |
1251                                                 TG3_FLAG_TX_PAUSE);
1252                                 else if (remote_adv & LPA_PAUSE_ASYM)
1253                                         new_tg3_flags |=
1254                                                 (TG3_FLAG_RX_PAUSE);
1255                         } else {
1256                                 if (remote_adv & LPA_PAUSE_CAP)
1257                                         new_tg3_flags |=
1258                                                 (TG3_FLAG_RX_PAUSE |
1259                                                 TG3_FLAG_TX_PAUSE);
1260                         }
1261                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1262                         if ((remote_adv & LPA_PAUSE_CAP) &&
1263                         (remote_adv & LPA_PAUSE_ASYM))
1264                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1265                 }
1266
1267                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1268                 tp->tg3_flags |= new_tg3_flags;
1269         } else {
1270                 new_tg3_flags = tp->tg3_flags;
1271         }
1272
1273         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1274                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1275         else
1276                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1277
1278         if (old_rx_mode != tp->rx_mode) {
1279                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1280         }
1281         
1282         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1283                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1284         else
1285                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1286
1287         if (old_tx_mode != tp->tx_mode) {
1288                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1289         }
1290 }
1291
1292 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1293 {
1294         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1295         case MII_TG3_AUX_STAT_10HALF:
1296                 *speed = SPEED_10;
1297                 *duplex = DUPLEX_HALF;
1298                 break;
1299
1300         case MII_TG3_AUX_STAT_10FULL:
1301                 *speed = SPEED_10;
1302                 *duplex = DUPLEX_FULL;
1303                 break;
1304
1305         case MII_TG3_AUX_STAT_100HALF:
1306                 *speed = SPEED_100;
1307                 *duplex = DUPLEX_HALF;
1308                 break;
1309
1310         case MII_TG3_AUX_STAT_100FULL:
1311                 *speed = SPEED_100;
1312                 *duplex = DUPLEX_FULL;
1313                 break;
1314
1315         case MII_TG3_AUX_STAT_1000HALF:
1316                 *speed = SPEED_1000;
1317                 *duplex = DUPLEX_HALF;
1318                 break;
1319
1320         case MII_TG3_AUX_STAT_1000FULL:
1321                 *speed = SPEED_1000;
1322                 *duplex = DUPLEX_FULL;
1323                 break;
1324
1325         default:
1326                 *speed = SPEED_INVALID;
1327                 *duplex = DUPLEX_INVALID;
1328                 break;
1329         };
1330 }
1331
1332 static void tg3_phy_copper_begin(struct tg3 *tp)
1333 {
1334         u32 new_adv;
1335         int i;
1336
1337         if (tp->link_config.phy_is_low_power) {
1338                 /* Entering low power mode.  Disable gigabit and
1339                  * 100baseT advertisements.
1340                  */
1341                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1342
1343                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1344                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1345                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1346                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1347
1348                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1349         } else if (tp->link_config.speed == SPEED_INVALID) {
1350                 tp->link_config.advertising =
1351                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1352                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1353                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1354                          ADVERTISED_Autoneg | ADVERTISED_MII);
1355
1356                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1357                         tp->link_config.advertising &=
1358                                 ~(ADVERTISED_1000baseT_Half |
1359                                   ADVERTISED_1000baseT_Full);
1360
1361                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1362                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1363                         new_adv |= ADVERTISE_10HALF;
1364                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1365                         new_adv |= ADVERTISE_10FULL;
1366                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1367                         new_adv |= ADVERTISE_100HALF;
1368                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1369                         new_adv |= ADVERTISE_100FULL;
1370                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1371
1372                 if (tp->link_config.advertising &
1373                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1374                         new_adv = 0;
1375                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1376                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1377                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1378                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1379                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1380                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1381                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1382                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1383                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1384                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1385                 } else {
1386                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1387                 }
1388         } else {
1389                 /* Asking for a specific link mode. */
1390                 if (tp->link_config.speed == SPEED_1000) {
1391                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1392                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1393
1394                         if (tp->link_config.duplex == DUPLEX_FULL)
1395                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1396                         else
1397                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1398                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1399                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1400                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1401                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1402                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1403                 } else {
1404                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1405
1406                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1407                         if (tp->link_config.speed == SPEED_100) {
1408                                 if (tp->link_config.duplex == DUPLEX_FULL)
1409                                         new_adv |= ADVERTISE_100FULL;
1410                                 else
1411                                         new_adv |= ADVERTISE_100HALF;
1412                         } else {
1413                                 if (tp->link_config.duplex == DUPLEX_FULL)
1414                                         new_adv |= ADVERTISE_10FULL;
1415                                 else
1416                                         new_adv |= ADVERTISE_10HALF;
1417                         }
1418                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1419                 }
1420         }
1421
1422         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1423             tp->link_config.speed != SPEED_INVALID) {
1424                 u32 bmcr, orig_bmcr;
1425
1426                 tp->link_config.active_speed = tp->link_config.speed;
1427                 tp->link_config.active_duplex = tp->link_config.duplex;
1428
1429                 bmcr = 0;
1430                 switch (tp->link_config.speed) {
1431                 default:
1432                 case SPEED_10:
1433                         break;
1434
1435                 case SPEED_100:
1436                         bmcr |= BMCR_SPEED100;
1437                         break;
1438
1439                 case SPEED_1000:
1440                         bmcr |= TG3_BMCR_SPEED1000;
1441                         break;
1442                 };
1443
1444                 if (tp->link_config.duplex == DUPLEX_FULL)
1445                         bmcr |= BMCR_FULLDPLX;
1446
1447                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1448                     (bmcr != orig_bmcr)) {
1449                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1450                         for (i = 0; i < 1500; i++) {
1451                                 u32 tmp;
1452
1453                                 udelay(10);
1454                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1455                                     tg3_readphy(tp, MII_BMSR, &tmp))
1456                                         continue;
1457                                 if (!(tmp & BMSR_LSTATUS)) {
1458                                         udelay(40);
1459                                         break;
1460                                 }
1461                         }
1462                         tg3_writephy(tp, MII_BMCR, bmcr);
1463                         udelay(40);
1464                 }
1465         } else {
1466                 tg3_writephy(tp, MII_BMCR,
1467                              BMCR_ANENABLE | BMCR_ANRESTART);
1468         }
1469 }
1470
1471 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1472 {
1473         int err;
1474
1475         /* Turn off tap power management. */
1476         /* Set Extended packet length bit */
1477         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1478
1479         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1480         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1481
1482         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1483         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1484
1485         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1486         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1487
1488         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1489         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1490
1491         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1492         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1493
1494         udelay(40);
1495
1496         return err;
1497 }
1498
1499 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1500 {
1501         u32 adv_reg, all_mask;
1502
1503         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1504                 return 0;
1505
1506         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1507                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1508         if ((adv_reg & all_mask) != all_mask)
1509                 return 0;
1510         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1511                 u32 tg3_ctrl;
1512
1513                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1514                         return 0;
1515
1516                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1517                             MII_TG3_CTRL_ADV_1000_FULL);
1518                 if ((tg3_ctrl & all_mask) != all_mask)
1519                         return 0;
1520         }
1521         return 1;
1522 }
1523
1524 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1525 {
1526         int current_link_up;
1527         u32 bmsr, dummy;
1528         u16 current_speed;
1529         u8 current_duplex;
1530         int i, err;
1531
1532         tw32(MAC_EVENT, 0);
1533
1534         tw32_f(MAC_STATUS,
1535              (MAC_STATUS_SYNC_CHANGED |
1536               MAC_STATUS_CFG_CHANGED |
1537               MAC_STATUS_MI_COMPLETION |
1538               MAC_STATUS_LNKSTATE_CHANGED));
1539         udelay(40);
1540
1541         tp->mi_mode = MAC_MI_MODE_BASE;
1542         tw32_f(MAC_MI_MODE, tp->mi_mode);
1543         udelay(80);
1544
1545         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1546
1547         /* Some third-party PHYs need to be reset on link going
1548          * down.
1549          */
1550         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1551              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1552              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1553             netif_carrier_ok(tp->dev)) {
1554                 tg3_readphy(tp, MII_BMSR, &bmsr);
1555                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1556                     !(bmsr & BMSR_LSTATUS))
1557                         force_reset = 1;
1558         }
1559         if (force_reset)
1560                 tg3_phy_reset(tp);
1561
1562         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1563                 tg3_readphy(tp, MII_BMSR, &bmsr);
1564                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1565                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1566                         bmsr = 0;
1567
1568                 if (!(bmsr & BMSR_LSTATUS)) {
1569                         err = tg3_init_5401phy_dsp(tp);
1570                         if (err)
1571                                 return err;
1572
1573                         tg3_readphy(tp, MII_BMSR, &bmsr);
1574                         for (i = 0; i < 1000; i++) {
1575                                 udelay(10);
1576                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1577                                     (bmsr & BMSR_LSTATUS)) {
1578                                         udelay(40);
1579                                         break;
1580                                 }
1581                         }
1582
1583                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1584                             !(bmsr & BMSR_LSTATUS) &&
1585                             tp->link_config.active_speed == SPEED_1000) {
1586                                 err = tg3_phy_reset(tp);
1587                                 if (!err)
1588                                         err = tg3_init_5401phy_dsp(tp);
1589                                 if (err)
1590                                         return err;
1591                         }
1592                 }
1593         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1594                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1595                 /* 5701 {A0,B0} CRC bug workaround */
1596                 tg3_writephy(tp, 0x15, 0x0a75);
1597                 tg3_writephy(tp, 0x1c, 0x8c68);
1598                 tg3_writephy(tp, 0x1c, 0x8d68);
1599                 tg3_writephy(tp, 0x1c, 0x8c68);
1600         }
1601
1602         /* Clear pending interrupts... */
1603         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1604         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1605
1606         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1607                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1608         else
1609                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1610
1611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1612             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1613                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1614                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1615                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1616                 else
1617                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1618         }
1619
1620         current_link_up = 0;
1621         current_speed = SPEED_INVALID;
1622         current_duplex = DUPLEX_INVALID;
1623
1624         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1625                 u32 val;
1626
1627                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1628                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1629                 if (!(val & (1 << 10))) {
1630                         val |= (1 << 10);
1631                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1632                         goto relink;
1633                 }
1634         }
1635
1636         bmsr = 0;
1637         for (i = 0; i < 100; i++) {
1638                 tg3_readphy(tp, MII_BMSR, &bmsr);
1639                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1640                     (bmsr & BMSR_LSTATUS))
1641                         break;
1642                 udelay(40);
1643         }
1644
1645         if (bmsr & BMSR_LSTATUS) {
1646                 u32 aux_stat, bmcr;
1647
1648                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1649                 for (i = 0; i < 2000; i++) {
1650                         udelay(10);
1651                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1652                             aux_stat)
1653                                 break;
1654                 }
1655
1656                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1657                                              &current_speed,
1658                                              &current_duplex);
1659
1660                 bmcr = 0;
1661                 for (i = 0; i < 200; i++) {
1662                         tg3_readphy(tp, MII_BMCR, &bmcr);
1663                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1664                                 continue;
1665                         if (bmcr && bmcr != 0x7fff)
1666                                 break;
1667                         udelay(10);
1668                 }
1669
1670                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1671                         if (bmcr & BMCR_ANENABLE) {
1672                                 current_link_up = 1;
1673
1674                                 /* Force autoneg restart if we are exiting
1675                                  * low power mode.
1676                                  */
1677                                 if (!tg3_copper_is_advertising_all(tp))
1678                                         current_link_up = 0;
1679                         } else {
1680                                 current_link_up = 0;
1681                         }
1682                 } else {
1683                         if (!(bmcr & BMCR_ANENABLE) &&
1684                             tp->link_config.speed == current_speed &&
1685                             tp->link_config.duplex == current_duplex) {
1686                                 current_link_up = 1;
1687                         } else {
1688                                 current_link_up = 0;
1689                         }
1690                 }
1691
1692                 tp->link_config.active_speed = current_speed;
1693                 tp->link_config.active_duplex = current_duplex;
1694         }
1695
1696         if (current_link_up == 1 &&
1697             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1698             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1699                 u32 local_adv, remote_adv;
1700
1701                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1702                         local_adv = 0;
1703                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1704
1705                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1706                         remote_adv = 0;
1707
1708                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1709
1710                 /* If we are not advertising full pause capability,
1711                  * something is wrong.  Bring the link down and reconfigure.
1712                  */
1713                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1714                         current_link_up = 0;
1715                 } else {
1716                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1717                 }
1718         }
1719 relink:
1720         if (current_link_up == 0) {
1721                 u32 tmp;
1722
1723                 tg3_phy_copper_begin(tp);
1724
1725                 tg3_readphy(tp, MII_BMSR, &tmp);
1726                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1727                     (tmp & BMSR_LSTATUS))
1728                         current_link_up = 1;
1729         }
1730
1731         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1732         if (current_link_up == 1) {
1733                 if (tp->link_config.active_speed == SPEED_100 ||
1734                     tp->link_config.active_speed == SPEED_10)
1735                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1736                 else
1737                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1738         } else
1739                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1740
1741         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1742         if (tp->link_config.active_duplex == DUPLEX_HALF)
1743                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1744
1745         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1747                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1748                     (current_link_up == 1 &&
1749                      tp->link_config.active_speed == SPEED_10))
1750                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1751         } else {
1752                 if (current_link_up == 1)
1753                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1754         }
1755
1756         /* ??? Without this setting Netgear GA302T PHY does not
1757          * ??? send/receive packets...
1758          */
1759         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1760             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1761                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1762                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1763                 udelay(80);
1764         }
1765
1766         tw32_f(MAC_MODE, tp->mac_mode);
1767         udelay(40);
1768
1769         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1770                 /* Polled via timer. */
1771                 tw32_f(MAC_EVENT, 0);
1772         } else {
1773                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1774         }
1775         udelay(40);
1776
1777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1778             current_link_up == 1 &&
1779             tp->link_config.active_speed == SPEED_1000 &&
1780             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1781              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1782                 udelay(120);
1783                 tw32_f(MAC_STATUS,
1784                      (MAC_STATUS_SYNC_CHANGED |
1785                       MAC_STATUS_CFG_CHANGED));
1786                 udelay(40);
1787                 tg3_write_mem(tp,
1788                               NIC_SRAM_FIRMWARE_MBOX,
1789                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1790         }
1791
1792         if (current_link_up != netif_carrier_ok(tp->dev)) {
1793                 if (current_link_up)
1794                         netif_carrier_on(tp->dev);
1795                 else
1796                         netif_carrier_off(tp->dev);
1797                 tg3_link_report(tp);
1798         }
1799
1800         return 0;
1801 }
1802
1803 struct tg3_fiber_aneginfo {
1804         int state;
1805 #define ANEG_STATE_UNKNOWN              0
1806 #define ANEG_STATE_AN_ENABLE            1
1807 #define ANEG_STATE_RESTART_INIT         2
1808 #define ANEG_STATE_RESTART              3
1809 #define ANEG_STATE_DISABLE_LINK_OK      4
1810 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1811 #define ANEG_STATE_ABILITY_DETECT       6
1812 #define ANEG_STATE_ACK_DETECT_INIT      7
1813 #define ANEG_STATE_ACK_DETECT           8
1814 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1815 #define ANEG_STATE_COMPLETE_ACK         10
1816 #define ANEG_STATE_IDLE_DETECT_INIT     11
1817 #define ANEG_STATE_IDLE_DETECT          12
1818 #define ANEG_STATE_LINK_OK              13
1819 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1820 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1821
1822         u32 flags;
1823 #define MR_AN_ENABLE            0x00000001
1824 #define MR_RESTART_AN           0x00000002
1825 #define MR_AN_COMPLETE          0x00000004
1826 #define MR_PAGE_RX              0x00000008
1827 #define MR_NP_LOADED            0x00000010
1828 #define MR_TOGGLE_TX            0x00000020
1829 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1830 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1831 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1832 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1833 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1834 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1835 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1836 #define MR_TOGGLE_RX            0x00002000
1837 #define MR_NP_RX                0x00004000
1838
1839 #define MR_LINK_OK              0x80000000
1840
1841         unsigned long link_time, cur_time;
1842
1843         u32 ability_match_cfg;
1844         int ability_match_count;
1845
1846         char ability_match, idle_match, ack_match;
1847
1848         u32 txconfig, rxconfig;
1849 #define ANEG_CFG_NP             0x00000080
1850 #define ANEG_CFG_ACK            0x00000040
1851 #define ANEG_CFG_RF2            0x00000020
1852 #define ANEG_CFG_RF1            0x00000010
1853 #define ANEG_CFG_PS2            0x00000001
1854 #define ANEG_CFG_PS1            0x00008000
1855 #define ANEG_CFG_HD             0x00004000
1856 #define ANEG_CFG_FD             0x00002000
1857 #define ANEG_CFG_INVAL          0x00001f06
1858
1859 };
1860 #define ANEG_OK         0
1861 #define ANEG_DONE       1
1862 #define ANEG_TIMER_ENAB 2
1863 #define ANEG_FAILED     -1
1864
1865 #define ANEG_STATE_SETTLE_TIME  10000
1866
1867 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1868                                    struct tg3_fiber_aneginfo *ap)
1869 {
1870         unsigned long delta;
1871         u32 rx_cfg_reg;
1872         int ret;
1873
1874         if (ap->state == ANEG_STATE_UNKNOWN) {
1875                 ap->rxconfig = 0;
1876                 ap->link_time = 0;
1877                 ap->cur_time = 0;
1878                 ap->ability_match_cfg = 0;
1879                 ap->ability_match_count = 0;
1880                 ap->ability_match = 0;
1881                 ap->idle_match = 0;
1882                 ap->ack_match = 0;
1883         }
1884         ap->cur_time++;
1885
1886         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1887                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1888
1889                 if (rx_cfg_reg != ap->ability_match_cfg) {
1890                         ap->ability_match_cfg = rx_cfg_reg;
1891                         ap->ability_match = 0;
1892                         ap->ability_match_count = 0;
1893                 } else {
1894                         if (++ap->ability_match_count > 1) {
1895                                 ap->ability_match = 1;
1896                                 ap->ability_match_cfg = rx_cfg_reg;
1897                         }
1898                 }
1899                 if (rx_cfg_reg & ANEG_CFG_ACK)
1900                         ap->ack_match = 1;
1901                 else
1902                         ap->ack_match = 0;
1903
1904                 ap->idle_match = 0;
1905         } else {
1906                 ap->idle_match = 1;
1907                 ap->ability_match_cfg = 0;
1908                 ap->ability_match_count = 0;
1909                 ap->ability_match = 0;
1910                 ap->ack_match = 0;
1911
1912                 rx_cfg_reg = 0;
1913         }
1914
1915         ap->rxconfig = rx_cfg_reg;
1916         ret = ANEG_OK;
1917
1918         switch(ap->state) {
1919         case ANEG_STATE_UNKNOWN:
1920                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1921                         ap->state = ANEG_STATE_AN_ENABLE;
1922
1923                 /* fallthru */
1924         case ANEG_STATE_AN_ENABLE:
1925                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1926                 if (ap->flags & MR_AN_ENABLE) {
1927                         ap->link_time = 0;
1928                         ap->cur_time = 0;
1929                         ap->ability_match_cfg = 0;
1930                         ap->ability_match_count = 0;
1931                         ap->ability_match = 0;
1932                         ap->idle_match = 0;
1933                         ap->ack_match = 0;
1934
1935                         ap->state = ANEG_STATE_RESTART_INIT;
1936                 } else {
1937                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1938                 }
1939                 break;
1940
1941         case ANEG_STATE_RESTART_INIT:
1942                 ap->link_time = ap->cur_time;
1943                 ap->flags &= ~(MR_NP_LOADED);
1944                 ap->txconfig = 0;
1945                 tw32(MAC_TX_AUTO_NEG, 0);
1946                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1947                 tw32_f(MAC_MODE, tp->mac_mode);
1948                 udelay(40);
1949
1950                 ret = ANEG_TIMER_ENAB;
1951                 ap->state = ANEG_STATE_RESTART;
1952
1953                 /* fallthru */
1954         case ANEG_STATE_RESTART:
1955                 delta = ap->cur_time - ap->link_time;
1956                 if (delta > ANEG_STATE_SETTLE_TIME) {
1957                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1958                 } else {
1959                         ret = ANEG_TIMER_ENAB;
1960                 }
1961                 break;
1962
1963         case ANEG_STATE_DISABLE_LINK_OK:
1964                 ret = ANEG_DONE;
1965                 break;
1966
1967         case ANEG_STATE_ABILITY_DETECT_INIT:
1968                 ap->flags &= ~(MR_TOGGLE_TX);
1969                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1970                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1971                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1972                 tw32_f(MAC_MODE, tp->mac_mode);
1973                 udelay(40);
1974
1975                 ap->state = ANEG_STATE_ABILITY_DETECT;
1976                 break;
1977
1978         case ANEG_STATE_ABILITY_DETECT:
1979                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1980                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1981                 }
1982                 break;
1983
1984         case ANEG_STATE_ACK_DETECT_INIT:
1985                 ap->txconfig |= ANEG_CFG_ACK;
1986                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1987                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1988                 tw32_f(MAC_MODE, tp->mac_mode);
1989                 udelay(40);
1990
1991                 ap->state = ANEG_STATE_ACK_DETECT;
1992
1993                 /* fallthru */
1994         case ANEG_STATE_ACK_DETECT:
1995                 if (ap->ack_match != 0) {
1996                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1997                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1998                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1999                         } else {
2000                                 ap->state = ANEG_STATE_AN_ENABLE;
2001                         }
2002                 } else if (ap->ability_match != 0 &&
2003                            ap->rxconfig == 0) {
2004                         ap->state = ANEG_STATE_AN_ENABLE;
2005                 }
2006                 break;
2007
2008         case ANEG_STATE_COMPLETE_ACK_INIT:
2009                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2010                         ret = ANEG_FAILED;
2011                         break;
2012                 }
2013                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2014                                MR_LP_ADV_HALF_DUPLEX |
2015                                MR_LP_ADV_SYM_PAUSE |
2016                                MR_LP_ADV_ASYM_PAUSE |
2017                                MR_LP_ADV_REMOTE_FAULT1 |
2018                                MR_LP_ADV_REMOTE_FAULT2 |
2019                                MR_LP_ADV_NEXT_PAGE |
2020                                MR_TOGGLE_RX |
2021                                MR_NP_RX);
2022                 if (ap->rxconfig & ANEG_CFG_FD)
2023                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2024                 if (ap->rxconfig & ANEG_CFG_HD)
2025                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2026                 if (ap->rxconfig & ANEG_CFG_PS1)
2027                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2028                 if (ap->rxconfig & ANEG_CFG_PS2)
2029                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2030                 if (ap->rxconfig & ANEG_CFG_RF1)
2031                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2032                 if (ap->rxconfig & ANEG_CFG_RF2)
2033                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2034                 if (ap->rxconfig & ANEG_CFG_NP)
2035                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2036
2037                 ap->link_time = ap->cur_time;
2038
2039                 ap->flags ^= (MR_TOGGLE_TX);
2040                 if (ap->rxconfig & 0x0008)
2041                         ap->flags |= MR_TOGGLE_RX;
2042                 if (ap->rxconfig & ANEG_CFG_NP)
2043                         ap->flags |= MR_NP_RX;
2044                 ap->flags |= MR_PAGE_RX;
2045
2046                 ap->state = ANEG_STATE_COMPLETE_ACK;
2047                 ret = ANEG_TIMER_ENAB;
2048                 break;
2049
2050         case ANEG_STATE_COMPLETE_ACK:
2051                 if (ap->ability_match != 0 &&
2052                     ap->rxconfig == 0) {
2053                         ap->state = ANEG_STATE_AN_ENABLE;
2054                         break;
2055                 }
2056                 delta = ap->cur_time - ap->link_time;
2057                 if (delta > ANEG_STATE_SETTLE_TIME) {
2058                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2059                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2060                         } else {
2061                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2062                                     !(ap->flags & MR_NP_RX)) {
2063                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2064                                 } else {
2065                                         ret = ANEG_FAILED;
2066                                 }
2067                         }
2068                 }
2069                 break;
2070
2071         case ANEG_STATE_IDLE_DETECT_INIT:
2072                 ap->link_time = ap->cur_time;
2073                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2074                 tw32_f(MAC_MODE, tp->mac_mode);
2075                 udelay(40);
2076
2077                 ap->state = ANEG_STATE_IDLE_DETECT;
2078                 ret = ANEG_TIMER_ENAB;
2079                 break;
2080
2081         case ANEG_STATE_IDLE_DETECT:
2082                 if (ap->ability_match != 0 &&
2083                     ap->rxconfig == 0) {
2084                         ap->state = ANEG_STATE_AN_ENABLE;
2085                         break;
2086                 }
2087                 delta = ap->cur_time - ap->link_time;
2088                 if (delta > ANEG_STATE_SETTLE_TIME) {
2089                         /* XXX another gem from the Broadcom driver :( */
2090                         ap->state = ANEG_STATE_LINK_OK;
2091                 }
2092                 break;
2093
2094         case ANEG_STATE_LINK_OK:
2095                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2096                 ret = ANEG_DONE;
2097                 break;
2098
2099         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2100                 /* ??? unimplemented */
2101                 break;
2102
2103         case ANEG_STATE_NEXT_PAGE_WAIT:
2104                 /* ??? unimplemented */
2105                 break;
2106
2107         default:
2108                 ret = ANEG_FAILED;
2109                 break;
2110         };
2111
2112         return ret;
2113 }
2114
2115 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2116 {
2117         int res = 0;
2118         struct tg3_fiber_aneginfo aninfo;
2119         int status = ANEG_FAILED;
2120         unsigned int tick;
2121         u32 tmp;
2122
2123         tw32_f(MAC_TX_AUTO_NEG, 0);
2124
2125         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2126         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2127         udelay(40);
2128
2129         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2130         udelay(40);
2131
2132         memset(&aninfo, 0, sizeof(aninfo));
2133         aninfo.flags |= MR_AN_ENABLE;
2134         aninfo.state = ANEG_STATE_UNKNOWN;
2135         aninfo.cur_time = 0;
2136         tick = 0;
2137         while (++tick < 195000) {
2138                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2139                 if (status == ANEG_DONE || status == ANEG_FAILED)
2140                         break;
2141
2142                 udelay(1);
2143         }
2144
2145         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2146         tw32_f(MAC_MODE, tp->mac_mode);
2147         udelay(40);
2148
2149         *flags = aninfo.flags;
2150
2151         if (status == ANEG_DONE &&
2152             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2153                              MR_LP_ADV_FULL_DUPLEX)))
2154                 res = 1;
2155
2156         return res;
2157 }
2158
2159 static void tg3_init_bcm8002(struct tg3 *tp)
2160 {
2161         u32 mac_status = tr32(MAC_STATUS);
2162         int i;
2163
2164         /* Reset when initting first time or we have a link. */
2165         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2166             !(mac_status & MAC_STATUS_PCS_SYNCED))
2167                 return;
2168
2169         /* Set PLL lock range. */
2170         tg3_writephy(tp, 0x16, 0x8007);
2171
2172         /* SW reset */
2173         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2174
2175         /* Wait for reset to complete. */
2176         /* XXX schedule_timeout() ... */
2177         for (i = 0; i < 500; i++)
2178                 udelay(10);
2179
2180         /* Config mode; select PMA/Ch 1 regs. */
2181         tg3_writephy(tp, 0x10, 0x8411);
2182
2183         /* Enable auto-lock and comdet, select txclk for tx. */
2184         tg3_writephy(tp, 0x11, 0x0a10);
2185
2186         tg3_writephy(tp, 0x18, 0x00a0);
2187         tg3_writephy(tp, 0x16, 0x41ff);
2188
2189         /* Assert and deassert POR. */
2190         tg3_writephy(tp, 0x13, 0x0400);
2191         udelay(40);
2192         tg3_writephy(tp, 0x13, 0x0000);
2193
2194         tg3_writephy(tp, 0x11, 0x0a50);
2195         udelay(40);
2196         tg3_writephy(tp, 0x11, 0x0a10);
2197
2198         /* Wait for signal to stabilize */
2199         /* XXX schedule_timeout() ... */
2200         for (i = 0; i < 15000; i++)
2201                 udelay(10);
2202
2203         /* Deselect the channel register so we can read the PHYID
2204          * later.
2205          */
2206         tg3_writephy(tp, 0x10, 0x8011);
2207 }
2208
2209 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2210 {
2211         u32 sg_dig_ctrl, sg_dig_status;
2212         u32 serdes_cfg, expected_sg_dig_ctrl;
2213         int workaround, port_a;
2214         int current_link_up;
2215
2216         serdes_cfg = 0;
2217         expected_sg_dig_ctrl = 0;
2218         workaround = 0;
2219         port_a = 1;
2220         current_link_up = 0;
2221
2222         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2223             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2224                 workaround = 1;
2225                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2226                         port_a = 0;
2227
2228                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2229                 /* preserve bits 20-23 for voltage regulator */
2230                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2231         }
2232
2233         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2234
2235         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2236                 if (sg_dig_ctrl & (1 << 31)) {
2237                         if (workaround) {
2238                                 u32 val = serdes_cfg;
2239
2240                                 if (port_a)
2241                                         val |= 0xc010000;
2242                                 else
2243                                         val |= 0x4010000;
2244                                 tw32_f(MAC_SERDES_CFG, val);
2245                         }
2246                         tw32_f(SG_DIG_CTRL, 0x01388400);
2247                 }
2248                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2249                         tg3_setup_flow_control(tp, 0, 0);
2250                         current_link_up = 1;
2251                 }
2252                 goto out;
2253         }
2254
2255         /* Want auto-negotiation.  */
2256         expected_sg_dig_ctrl = 0x81388400;
2257
2258         /* Pause capability */
2259         expected_sg_dig_ctrl |= (1 << 11);
2260
2261         /* Asymettric pause */
2262         expected_sg_dig_ctrl |= (1 << 12);
2263
2264         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2265                 if (workaround)
2266                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2267                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2268                 udelay(5);
2269                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2270
2271                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2272         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2273                                  MAC_STATUS_SIGNAL_DET)) {
2274                 int i;
2275
2276                 /* Giver time to negotiate (~200ms) */
2277                 for (i = 0; i < 40000; i++) {
2278                         sg_dig_status = tr32(SG_DIG_STATUS);
2279                         if (sg_dig_status & (0x3))
2280                                 break;
2281                         udelay(5);
2282                 }
2283                 mac_status = tr32(MAC_STATUS);
2284
2285                 if ((sg_dig_status & (1 << 1)) &&
2286                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2287                         u32 local_adv, remote_adv;
2288
2289                         local_adv = ADVERTISE_PAUSE_CAP;
2290                         remote_adv = 0;
2291                         if (sg_dig_status & (1 << 19))
2292                                 remote_adv |= LPA_PAUSE_CAP;
2293                         if (sg_dig_status & (1 << 20))
2294                                 remote_adv |= LPA_PAUSE_ASYM;
2295
2296                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2297                         current_link_up = 1;
2298                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2299                 } else if (!(sg_dig_status & (1 << 1))) {
2300                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2301                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2302                         else {
2303                                 if (workaround) {
2304                                         u32 val = serdes_cfg;
2305
2306                                         if (port_a)
2307                                                 val |= 0xc010000;
2308                                         else
2309                                                 val |= 0x4010000;
2310
2311                                         tw32_f(MAC_SERDES_CFG, val);
2312                                 }
2313
2314                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2315                                 udelay(40);
2316
2317                                 /* Link parallel detection - link is up */
2318                                 /* only if we have PCS_SYNC and not */
2319                                 /* receiving config code words */
2320                                 mac_status = tr32(MAC_STATUS);
2321                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2322                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2323                                         tg3_setup_flow_control(tp, 0, 0);
2324                                         current_link_up = 1;
2325                                 }
2326                         }
2327                 }
2328         }
2329
2330 out:
2331         return current_link_up;
2332 }
2333
2334 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2335 {
2336         int current_link_up = 0;
2337
2338         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2339                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2340                 goto out;
2341         }
2342
2343         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2344                 u32 flags;
2345                 int i;
2346   
2347                 if (fiber_autoneg(tp, &flags)) {
2348                         u32 local_adv, remote_adv;
2349
2350                         local_adv = ADVERTISE_PAUSE_CAP;
2351                         remote_adv = 0;
2352                         if (flags & MR_LP_ADV_SYM_PAUSE)
2353                                 remote_adv |= LPA_PAUSE_CAP;
2354                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2355                                 remote_adv |= LPA_PAUSE_ASYM;
2356
2357                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2358
2359                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2360                         current_link_up = 1;
2361                 }
2362                 for (i = 0; i < 30; i++) {
2363                         udelay(20);
2364                         tw32_f(MAC_STATUS,
2365                                (MAC_STATUS_SYNC_CHANGED |
2366                                 MAC_STATUS_CFG_CHANGED));
2367                         udelay(40);
2368                         if ((tr32(MAC_STATUS) &
2369                              (MAC_STATUS_SYNC_CHANGED |
2370                               MAC_STATUS_CFG_CHANGED)) == 0)
2371                                 break;
2372                 }
2373
2374                 mac_status = tr32(MAC_STATUS);
2375                 if (current_link_up == 0 &&
2376                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2377                     !(mac_status & MAC_STATUS_RCVD_CFG))
2378                         current_link_up = 1;
2379         } else {
2380                 /* Forcing 1000FD link up. */
2381                 current_link_up = 1;
2382                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2383
2384                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2385                 udelay(40);
2386         }
2387
2388 out:
2389         return current_link_up;
2390 }
2391
2392 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2393 {
2394         u32 orig_pause_cfg;
2395         u16 orig_active_speed;
2396         u8 orig_active_duplex;
2397         u32 mac_status;
2398         int current_link_up;
2399         int i;
2400
2401         orig_pause_cfg =
2402                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2403                                   TG3_FLAG_TX_PAUSE));
2404         orig_active_speed = tp->link_config.active_speed;
2405         orig_active_duplex = tp->link_config.active_duplex;
2406
2407         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2408             netif_carrier_ok(tp->dev) &&
2409             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2410                 mac_status = tr32(MAC_STATUS);
2411                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2412                                MAC_STATUS_SIGNAL_DET |
2413                                MAC_STATUS_CFG_CHANGED |
2414                                MAC_STATUS_RCVD_CFG);
2415                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2416                                    MAC_STATUS_SIGNAL_DET)) {
2417                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2418                                             MAC_STATUS_CFG_CHANGED));
2419                         return 0;
2420                 }
2421         }
2422
2423         tw32_f(MAC_TX_AUTO_NEG, 0);
2424
2425         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2426         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2427         tw32_f(MAC_MODE, tp->mac_mode);
2428         udelay(40);
2429
2430         if (tp->phy_id == PHY_ID_BCM8002)
2431                 tg3_init_bcm8002(tp);
2432
2433         /* Enable link change event even when serdes polling.  */
2434         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2435         udelay(40);
2436
2437         current_link_up = 0;
2438         mac_status = tr32(MAC_STATUS);
2439
2440         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2441                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2442         else
2443                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2444
2445         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2446         tw32_f(MAC_MODE, tp->mac_mode);
2447         udelay(40);
2448
2449         tp->hw_status->status =
2450                 (SD_STATUS_UPDATED |
2451                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2452
2453         for (i = 0; i < 100; i++) {
2454                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2455                                     MAC_STATUS_CFG_CHANGED));
2456                 udelay(5);
2457                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2458                                          MAC_STATUS_CFG_CHANGED)) == 0)
2459                         break;
2460         }
2461
2462         mac_status = tr32(MAC_STATUS);
2463         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2464                 current_link_up = 0;
2465                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2466                         tw32_f(MAC_MODE, (tp->mac_mode |
2467                                           MAC_MODE_SEND_CONFIGS));
2468                         udelay(1);
2469                         tw32_f(MAC_MODE, tp->mac_mode);
2470                 }
2471         }
2472
2473         if (current_link_up == 1) {
2474                 tp->link_config.active_speed = SPEED_1000;
2475                 tp->link_config.active_duplex = DUPLEX_FULL;
2476                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2477                                     LED_CTRL_LNKLED_OVERRIDE |
2478                                     LED_CTRL_1000MBPS_ON));
2479         } else {
2480                 tp->link_config.active_speed = SPEED_INVALID;
2481                 tp->link_config.active_duplex = DUPLEX_INVALID;
2482                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2483                                     LED_CTRL_LNKLED_OVERRIDE |
2484                                     LED_CTRL_TRAFFIC_OVERRIDE));
2485         }
2486
2487         if (current_link_up != netif_carrier_ok(tp->dev)) {
2488                 if (current_link_up)
2489                         netif_carrier_on(tp->dev);
2490                 else
2491                         netif_carrier_off(tp->dev);
2492                 tg3_link_report(tp);
2493         } else {
2494                 u32 now_pause_cfg =
2495                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2496                                          TG3_FLAG_TX_PAUSE);
2497                 if (orig_pause_cfg != now_pause_cfg ||
2498                     orig_active_speed != tp->link_config.active_speed ||
2499                     orig_active_duplex != tp->link_config.active_duplex)
2500                         tg3_link_report(tp);
2501         }
2502
2503         return 0;
2504 }
2505
2506 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2507 {
2508         int err;
2509
2510         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2511                 err = tg3_setup_fiber_phy(tp, force_reset);
2512         } else {
2513                 err = tg3_setup_copper_phy(tp, force_reset);
2514         }
2515
2516         if (tp->link_config.active_speed == SPEED_1000 &&
2517             tp->link_config.active_duplex == DUPLEX_HALF)
2518                 tw32(MAC_TX_LENGTHS,
2519                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2520                       (6 << TX_LENGTHS_IPG_SHIFT) |
2521                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2522         else
2523                 tw32(MAC_TX_LENGTHS,
2524                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2525                       (6 << TX_LENGTHS_IPG_SHIFT) |
2526                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2527
2528         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2529                 if (netif_carrier_ok(tp->dev)) {
2530                         tw32(HOSTCC_STAT_COAL_TICKS,
2531                              tp->coal.stats_block_coalesce_usecs);
2532                 } else {
2533                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2534                 }
2535         }
2536
2537         return err;
2538 }
2539
2540 /* Tigon3 never reports partial packet sends.  So we do not
2541  * need special logic to handle SKBs that have not had all
2542  * of their frags sent yet, like SunGEM does.
2543  */
2544 static void tg3_tx(struct tg3 *tp)
2545 {
2546         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2547         u32 sw_idx = tp->tx_cons;
2548
2549         while (sw_idx != hw_idx) {
2550                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2551                 struct sk_buff *skb = ri->skb;
2552                 int i;
2553
2554                 if (unlikely(skb == NULL))
2555                         BUG();
2556
2557                 pci_unmap_single(tp->pdev,
2558                                  pci_unmap_addr(ri, mapping),
2559                                  skb_headlen(skb),
2560                                  PCI_DMA_TODEVICE);
2561
2562                 ri->skb = NULL;
2563
2564                 sw_idx = NEXT_TX(sw_idx);
2565
2566                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2567                         if (unlikely(sw_idx == hw_idx))
2568                                 BUG();
2569
2570                         ri = &tp->tx_buffers[sw_idx];
2571                         if (unlikely(ri->skb != NULL))
2572                                 BUG();
2573
2574                         pci_unmap_page(tp->pdev,
2575                                        pci_unmap_addr(ri, mapping),
2576                                        skb_shinfo(skb)->frags[i].size,
2577                                        PCI_DMA_TODEVICE);
2578
2579                         sw_idx = NEXT_TX(sw_idx);
2580                 }
2581
2582                 dev_kfree_skb_irq(skb);
2583         }
2584
2585         tp->tx_cons = sw_idx;
2586
2587         if (netif_queue_stopped(tp->dev) &&
2588             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2589                 netif_wake_queue(tp->dev);
2590 }
2591
2592 /* Returns size of skb allocated or < 0 on error.
2593  *
2594  * We only need to fill in the address because the other members
2595  * of the RX descriptor are invariant, see tg3_init_rings.
2596  *
2597  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2598  * posting buffers we only dirty the first cache line of the RX
2599  * descriptor (containing the address).  Whereas for the RX status
2600  * buffers the cpu only reads the last cacheline of the RX descriptor
2601  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2602  */
2603 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2604                             int src_idx, u32 dest_idx_unmasked)
2605 {
2606         struct tg3_rx_buffer_desc *desc;
2607         struct ring_info *map, *src_map;
2608         struct sk_buff *skb;
2609         dma_addr_t mapping;
2610         int skb_size, dest_idx;
2611
2612         src_map = NULL;
2613         switch (opaque_key) {
2614         case RXD_OPAQUE_RING_STD:
2615                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2616                 desc = &tp->rx_std[dest_idx];
2617                 map = &tp->rx_std_buffers[dest_idx];
2618                 if (src_idx >= 0)
2619                         src_map = &tp->rx_std_buffers[src_idx];
2620                 skb_size = RX_PKT_BUF_SZ;
2621                 break;
2622
2623         case RXD_OPAQUE_RING_JUMBO:
2624                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2625                 desc = &tp->rx_jumbo[dest_idx];
2626                 map = &tp->rx_jumbo_buffers[dest_idx];
2627                 if (src_idx >= 0)
2628                         src_map = &tp->rx_jumbo_buffers[src_idx];
2629                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2630                 break;
2631
2632         default:
2633                 return -EINVAL;
2634         };
2635
2636         /* Do not overwrite any of the map or rp information
2637          * until we are sure we can commit to a new buffer.
2638          *
2639          * Callers depend upon this behavior and assume that
2640          * we leave everything unchanged if we fail.
2641          */
2642         skb = dev_alloc_skb(skb_size);
2643         if (skb == NULL)
2644                 return -ENOMEM;
2645
2646         skb->dev = tp->dev;
2647         skb_reserve(skb, tp->rx_offset);
2648
2649         mapping = pci_map_single(tp->pdev, skb->data,
2650                                  skb_size - tp->rx_offset,
2651                                  PCI_DMA_FROMDEVICE);
2652
2653         map->skb = skb;
2654         pci_unmap_addr_set(map, mapping, mapping);
2655
2656         if (src_map != NULL)
2657                 src_map->skb = NULL;
2658
2659         desc->addr_hi = ((u64)mapping >> 32);
2660         desc->addr_lo = ((u64)mapping & 0xffffffff);
2661
2662         return skb_size;
2663 }
2664
2665 /* We only need to move over in the address because the other
2666  * members of the RX descriptor are invariant.  See notes above
2667  * tg3_alloc_rx_skb for full details.
2668  */
2669 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2670                            int src_idx, u32 dest_idx_unmasked)
2671 {
2672         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2673         struct ring_info *src_map, *dest_map;
2674         int dest_idx;
2675
2676         switch (opaque_key) {
2677         case RXD_OPAQUE_RING_STD:
2678                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2679                 dest_desc = &tp->rx_std[dest_idx];
2680                 dest_map = &tp->rx_std_buffers[dest_idx];
2681                 src_desc = &tp->rx_std[src_idx];
2682                 src_map = &tp->rx_std_buffers[src_idx];
2683                 break;
2684
2685         case RXD_OPAQUE_RING_JUMBO:
2686                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2687                 dest_desc = &tp->rx_jumbo[dest_idx];
2688                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2689                 src_desc = &tp->rx_jumbo[src_idx];
2690                 src_map = &tp->rx_jumbo_buffers[src_idx];
2691                 break;
2692
2693         default:
2694                 return;
2695         };
2696
2697         dest_map->skb = src_map->skb;
2698         pci_unmap_addr_set(dest_map, mapping,
2699                            pci_unmap_addr(src_map, mapping));
2700         dest_desc->addr_hi = src_desc->addr_hi;
2701         dest_desc->addr_lo = src_desc->addr_lo;
2702
2703         src_map->skb = NULL;
2704 }
2705
2706 #if TG3_VLAN_TAG_USED
2707 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2708 {
2709         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2710 }
2711 #endif
2712
2713 /* The RX ring scheme is composed of multiple rings which post fresh
2714  * buffers to the chip, and one special ring the chip uses to report
2715  * status back to the host.
2716  *
2717  * The special ring reports the status of received packets to the
2718  * host.  The chip does not write into the original descriptor the
2719  * RX buffer was obtained from.  The chip simply takes the original
2720  * descriptor as provided by the host, updates the status and length
2721  * field, then writes this into the next status ring entry.
2722  *
2723  * Each ring the host uses to post buffers to the chip is described
2724  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2725  * it is first placed into the on-chip ram.  When the packet's length
2726  * is known, it walks down the TG3_BDINFO entries to select the ring.
2727  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2728  * which is within the range of the new packet's length is chosen.
2729  *
2730  * The "separate ring for rx status" scheme may sound queer, but it makes
2731  * sense from a cache coherency perspective.  If only the host writes
2732  * to the buffer post rings, and only the chip writes to the rx status
2733  * rings, then cache lines never move beyond shared-modified state.
2734  * If both the host and chip were to write into the same ring, cache line
2735  * eviction could occur since both entities want it in an exclusive state.
2736  */
2737 static int tg3_rx(struct tg3 *tp, int budget)
2738 {
2739         u32 work_mask;
2740         u32 sw_idx = tp->rx_rcb_ptr;
2741         u16 hw_idx;
2742         int received;
2743
2744         hw_idx = tp->hw_status->idx[0].rx_producer;
2745         /*
2746          * We need to order the read of hw_idx and the read of
2747          * the opaque cookie.
2748          */
2749         rmb();
2750         work_mask = 0;
2751         received = 0;
2752         while (sw_idx != hw_idx && budget > 0) {
2753                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2754                 unsigned int len;
2755                 struct sk_buff *skb;
2756                 dma_addr_t dma_addr;
2757                 u32 opaque_key, desc_idx, *post_ptr;
2758
2759                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2760                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2761                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2762                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2763                                                   mapping);
2764                         skb = tp->rx_std_buffers[desc_idx].skb;
2765                         post_ptr = &tp->rx_std_ptr;
2766                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2767                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2768                                                   mapping);
2769                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2770                         post_ptr = &tp->rx_jumbo_ptr;
2771                 }
2772                 else {
2773                         goto next_pkt_nopost;
2774                 }
2775
2776                 work_mask |= opaque_key;
2777
2778                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2779                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2780                 drop_it:
2781                         tg3_recycle_rx(tp, opaque_key,
2782                                        desc_idx, *post_ptr);
2783                 drop_it_no_recycle:
2784                         /* Other statistics kept track of by card. */
2785                         tp->net_stats.rx_dropped++;
2786                         goto next_pkt;
2787                 }
2788
2789                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2790
2791                 if (len > RX_COPY_THRESHOLD 
2792                         && tp->rx_offset == 2
2793                         /* rx_offset != 2 iff this is a 5701 card running
2794                          * in PCI-X mode [see tg3_get_invariants()] */
2795                 ) {
2796                         int skb_size;
2797
2798                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2799                                                     desc_idx, *post_ptr);
2800                         if (skb_size < 0)
2801                                 goto drop_it;
2802
2803                         pci_unmap_single(tp->pdev, dma_addr,
2804                                          skb_size - tp->rx_offset,
2805                                          PCI_DMA_FROMDEVICE);
2806
2807                         skb_put(skb, len);
2808                 } else {
2809                         struct sk_buff *copy_skb;
2810
2811                         tg3_recycle_rx(tp, opaque_key,
2812                                        desc_idx, *post_ptr);
2813
2814                         copy_skb = dev_alloc_skb(len + 2);
2815                         if (copy_skb == NULL)
2816                                 goto drop_it_no_recycle;
2817
2818                         copy_skb->dev = tp->dev;
2819                         skb_reserve(copy_skb, 2);
2820                         skb_put(copy_skb, len);
2821                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2822                         memcpy(copy_skb->data, skb->data, len);
2823                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2824
2825                         /* We'll reuse the original ring buffer. */
2826                         skb = copy_skb;
2827                 }
2828
2829                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2830                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2831                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2832                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2833                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2834                 else
2835                         skb->ip_summed = CHECKSUM_NONE;
2836
2837                 skb->protocol = eth_type_trans(skb, tp->dev);
2838 #if TG3_VLAN_TAG_USED
2839                 if (tp->vlgrp != NULL &&
2840                     desc->type_flags & RXD_FLAG_VLAN) {
2841                         tg3_vlan_rx(tp, skb,
2842                                     desc->err_vlan & RXD_VLAN_MASK);
2843                 } else
2844 #endif
2845                         netif_receive_skb(skb);
2846
2847                 tp->dev->last_rx = jiffies;
2848                 received++;
2849                 budget--;
2850
2851 next_pkt:
2852                 (*post_ptr)++;
2853 next_pkt_nopost:
2854                 sw_idx++;
2855                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2856
2857                 /* Refresh hw_idx to see if there is new work */
2858                 if (sw_idx == hw_idx) {
2859                         hw_idx = tp->hw_status->idx[0].rx_producer;
2860                         rmb();
2861                 }
2862         }
2863
2864         /* ACK the status ring. */
2865         tp->rx_rcb_ptr = sw_idx;
2866         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2867
2868         /* Refill RX ring(s). */
2869         if (work_mask & RXD_OPAQUE_RING_STD) {
2870                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2871                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2872                              sw_idx);
2873         }
2874         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2875                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2876                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2877                              sw_idx);
2878         }
2879         mmiowb();
2880
2881         return received;
2882 }
2883
2884 static int tg3_poll(struct net_device *netdev, int *budget)
2885 {
2886         struct tg3 *tp = netdev_priv(netdev);
2887         struct tg3_hw_status *sblk = tp->hw_status;
2888         unsigned long flags;
2889         int done;
2890
2891         spin_lock_irqsave(&tp->lock, flags);
2892
2893         /* handle link change and other phy events */
2894         if (!(tp->tg3_flags &
2895               (TG3_FLAG_USE_LINKCHG_REG |
2896                TG3_FLAG_POLL_SERDES))) {
2897                 if (sblk->status & SD_STATUS_LINK_CHG) {
2898                         sblk->status = SD_STATUS_UPDATED |
2899                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2900                         tg3_setup_phy(tp, 0);
2901                 }
2902         }
2903
2904         /* run TX completion thread */
2905         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2906                 spin_lock(&tp->tx_lock);
2907                 tg3_tx(tp);
2908                 spin_unlock(&tp->tx_lock);
2909         }
2910
2911         spin_unlock_irqrestore(&tp->lock, flags);
2912
2913         /* run RX thread, within the bounds set by NAPI.
2914          * All RX "locking" is done by ensuring outside
2915          * code synchronizes with dev->poll()
2916          */
2917         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2918                 int orig_budget = *budget;
2919                 int work_done;
2920
2921                 if (orig_budget > netdev->quota)
2922                         orig_budget = netdev->quota;
2923
2924                 work_done = tg3_rx(tp, orig_budget);
2925
2926                 *budget -= work_done;
2927                 netdev->quota -= work_done;
2928         }
2929
2930         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2931                 tp->last_tag = sblk->status_tag;
2932         rmb();
2933
2934         /* if no more work, tell net stack and NIC we're done */
2935         done = !tg3_has_work(tp);
2936         if (done) {
2937                 spin_lock_irqsave(&tp->lock, flags);
2938                 __netif_rx_complete(netdev);
2939                 tg3_restart_ints(tp);
2940                 spin_unlock_irqrestore(&tp->lock, flags);
2941         }
2942
2943         return (done ? 0 : 1);
2944 }
2945
2946 /* MSI ISR - No need to check for interrupt sharing and no need to
2947  * flush status block and interrupt mailbox. PCI ordering rules
2948  * guarantee that MSI will arrive after the status block.
2949  */
2950 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2951 {
2952         struct net_device *dev = dev_id;
2953         struct tg3 *tp = netdev_priv(dev);
2954         struct tg3_hw_status *sblk = tp->hw_status;
2955         unsigned long flags;
2956
2957         spin_lock_irqsave(&tp->lock, flags);
2958
2959         /*
2960          * Writing any value to intr-mbox-0 clears PCI INTA# and
2961          * chip-internal interrupt pending events.
2962          * Writing non-zero to intr-mbox-0 additional tells the
2963          * NIC to stop sending us irqs, engaging "in-intr-handler"
2964          * event coalescing.
2965          */
2966         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2967         tp->last_tag = sblk->status_tag;
2968         sblk->status &= ~SD_STATUS_UPDATED;
2969         if (likely(tg3_has_work(tp)))
2970                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2971         else {
2972                 /* No work, re-enable interrupts.  */
2973                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2974                              tp->last_tag << 24);
2975         }
2976
2977         spin_unlock_irqrestore(&tp->lock, flags);
2978
2979         return IRQ_RETVAL(1);
2980 }
2981
2982 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2983 {
2984         struct net_device *dev = dev_id;
2985         struct tg3 *tp = netdev_priv(dev);
2986         struct tg3_hw_status *sblk = tp->hw_status;
2987         unsigned long flags;
2988         unsigned int handled = 1;
2989
2990         spin_lock_irqsave(&tp->lock, flags);
2991
2992         /* In INTx mode, it is possible for the interrupt to arrive at
2993          * the CPU before the status block posted prior to the interrupt.
2994          * Reading the PCI State register will confirm whether the
2995          * interrupt is ours and will flush the status block.
2996          */
2997         if ((sblk->status & SD_STATUS_UPDATED) ||
2998             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2999                 /*
3000                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3001                  * chip-internal interrupt pending events.
3002                  * Writing non-zero to intr-mbox-0 additional tells the
3003                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3004                  * event coalescing.
3005                  */
3006                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3007                              0x00000001);
3008                 sblk->status &= ~SD_STATUS_UPDATED;
3009                 if (likely(tg3_has_work(tp)))
3010                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3011                 else {
3012                         /* No work, shared interrupt perhaps?  re-enable
3013                          * interrupts, and flush that PCI write
3014                          */
3015                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3016                                 0x00000000);
3017                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3018                 }
3019         } else {        /* shared interrupt */
3020                 handled = 0;
3021         }
3022
3023         spin_unlock_irqrestore(&tp->lock, flags);
3024
3025         return IRQ_RETVAL(handled);
3026 }
3027
3028 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3029 {
3030         struct net_device *dev = dev_id;
3031         struct tg3 *tp = netdev_priv(dev);
3032         struct tg3_hw_status *sblk = tp->hw_status;
3033         unsigned long flags;
3034         unsigned int handled = 1;
3035
3036         spin_lock_irqsave(&tp->lock, flags);
3037
3038         /* In INTx mode, it is possible for the interrupt to arrive at
3039          * the CPU before the status block posted prior to the interrupt.
3040          * Reading the PCI State register will confirm whether the
3041          * interrupt is ours and will flush the status block.
3042          */
3043         if ((sblk->status & SD_STATUS_UPDATED) ||
3044             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3045                 /*
3046                  * writing any value to intr-mbox-0 clears PCI INTA# and
3047                  * chip-internal interrupt pending events.
3048                  * writing non-zero to intr-mbox-0 additional tells the
3049                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3050                  * event coalescing.
3051                  */
3052                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3053                              0x00000001);
3054                 tp->last_tag = sblk->status_tag;
3055                 sblk->status &= ~SD_STATUS_UPDATED;
3056                 if (likely(tg3_has_work(tp)))
3057                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3058                 else {
3059                         /* no work, shared interrupt perhaps?  re-enable
3060                          * interrupts, and flush that PCI write
3061                          */
3062                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3063                                      tp->last_tag << 24);
3064                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3065                 }
3066         } else {        /* shared interrupt */
3067                 handled = 0;
3068         }
3069
3070         spin_unlock_irqrestore(&tp->lock, flags);
3071
3072         return IRQ_RETVAL(handled);
3073 }
3074
3075 /* ISR for interrupt test */
3076 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3077                 struct pt_regs *regs)
3078 {
3079         struct net_device *dev = dev_id;
3080         struct tg3 *tp = netdev_priv(dev);
3081         struct tg3_hw_status *sblk = tp->hw_status;
3082
3083         if (sblk->status & SD_STATUS_UPDATED) {
3084                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3085                              0x00000001);
3086                 return IRQ_RETVAL(1);
3087         }
3088         return IRQ_RETVAL(0);
3089 }
3090
3091 static int tg3_init_hw(struct tg3 *);
3092 static int tg3_halt(struct tg3 *, int, int);
3093
3094 #ifdef CONFIG_NET_POLL_CONTROLLER
3095 static void tg3_poll_controller(struct net_device *dev)
3096 {
3097         struct tg3 *tp = netdev_priv(dev);
3098
3099         tg3_interrupt(tp->pdev->irq, dev, NULL);
3100 }
3101 #endif
3102
3103 static void tg3_reset_task(void *_data)
3104 {
3105         struct tg3 *tp = _data;
3106         unsigned int restart_timer;
3107
3108         tg3_netif_stop(tp);
3109
3110         spin_lock_irq(&tp->lock);
3111         spin_lock(&tp->tx_lock);
3112
3113         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3114         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3115
3116         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3117         tg3_init_hw(tp);
3118
3119         tg3_netif_start(tp);
3120
3121         spin_unlock(&tp->tx_lock);
3122         spin_unlock_irq(&tp->lock);
3123
3124         if (restart_timer)
3125                 mod_timer(&tp->timer, jiffies + 1);
3126 }
3127
3128 static void tg3_tx_timeout(struct net_device *dev)
3129 {
3130         struct tg3 *tp = netdev_priv(dev);
3131
3132         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3133                dev->name);
3134
3135         schedule_work(&tp->reset_task);
3136 }
3137
3138 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3139
3140 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3141                                        u32 guilty_entry, int guilty_len,
3142                                        u32 last_plus_one, u32 *start, u32 mss)
3143 {
3144         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3145         dma_addr_t new_addr;
3146         u32 entry = *start;
3147         int i;
3148
3149         if (!new_skb) {
3150                 dev_kfree_skb(skb);
3151                 return -1;
3152         }
3153
3154         /* New SKB is guaranteed to be linear. */
3155         entry = *start;
3156         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3157                                   PCI_DMA_TODEVICE);
3158         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3159                     (skb->ip_summed == CHECKSUM_HW) ?
3160                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3161         *start = NEXT_TX(entry);
3162
3163         /* Now clean up the sw ring entries. */
3164         i = 0;
3165         while (entry != last_plus_one) {
3166                 int len;
3167
3168                 if (i == 0)
3169                         len = skb_headlen(skb);
3170                 else
3171                         len = skb_shinfo(skb)->frags[i-1].size;
3172                 pci_unmap_single(tp->pdev,
3173                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3174                                  len, PCI_DMA_TODEVICE);
3175                 if (i == 0) {
3176                         tp->tx_buffers[entry].skb = new_skb;
3177                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3178                 } else {
3179                         tp->tx_buffers[entry].skb = NULL;
3180                 }
3181                 entry = NEXT_TX(entry);
3182                 i++;
3183         }
3184
3185         dev_kfree_skb(skb);
3186
3187         return 0;
3188 }
3189
3190 static void tg3_set_txd(struct tg3 *tp, int entry,
3191                         dma_addr_t mapping, int len, u32 flags,
3192                         u32 mss_and_is_end)
3193 {
3194         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3195         int is_end = (mss_and_is_end & 0x1);
3196         u32 mss = (mss_and_is_end >> 1);
3197         u32 vlan_tag = 0;
3198
3199         if (is_end)
3200                 flags |= TXD_FLAG_END;
3201         if (flags & TXD_FLAG_VLAN) {
3202                 vlan_tag = flags >> 16;
3203                 flags &= 0xffff;
3204         }
3205         vlan_tag |= (mss << TXD_MSS_SHIFT);
3206
3207         txd->addr_hi = ((u64) mapping >> 32);
3208         txd->addr_lo = ((u64) mapping & 0xffffffff);
3209         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3210         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3211 }
3212
3213 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3214 {
3215         u32 base = (u32) mapping & 0xffffffff;
3216
3217         return ((base > 0xffffdcc0) &&
3218                 (base + len + 8 < base));
3219 }
3220
3221 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3222 {
3223         struct tg3 *tp = netdev_priv(dev);
3224         dma_addr_t mapping;
3225         unsigned int i;
3226         u32 len, entry, base_flags, mss;
3227         int would_hit_hwbug;
3228         unsigned long flags;
3229
3230         len = skb_headlen(skb);
3231
3232         /* No BH disabling for tx_lock here.  We are running in BH disabled
3233          * context and TX reclaim runs via tp->poll inside of a software
3234          * interrupt.  Rejoice!
3235          *
3236          * Actually, things are not so simple.  If we are to take a hw
3237          * IRQ here, we can deadlock, consider:
3238          *
3239          *       CPU1           CPU2
3240          *   tg3_start_xmit
3241          *   take tp->tx_lock
3242          *                      tg3_timer
3243          *                      take tp->lock
3244          *   tg3_interrupt
3245          *   spin on tp->lock
3246          *                      spin on tp->tx_lock
3247          *
3248          * So we really do need to disable interrupts when taking
3249          * tx_lock here.
3250          */
3251         local_irq_save(flags);
3252         if (!spin_trylock(&tp->tx_lock)) { 
3253                 local_irq_restore(flags);
3254                 return NETDEV_TX_LOCKED; 
3255         } 
3256
3257         /* This is a hard error, log it. */
3258         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3259                 netif_stop_queue(dev);
3260                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3261                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3262                        dev->name);
3263                 return NETDEV_TX_BUSY;
3264         }
3265
3266         entry = tp->tx_prod;
3267         base_flags = 0;
3268         if (skb->ip_summed == CHECKSUM_HW)
3269                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3270 #if TG3_TSO_SUPPORT != 0
3271         mss = 0;
3272         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3273             (mss = skb_shinfo(skb)->tso_size) != 0) {
3274                 int tcp_opt_len, ip_tcp_len;
3275
3276                 if (skb_header_cloned(skb) &&
3277                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3278                         dev_kfree_skb(skb);
3279                         goto out_unlock;
3280                 }
3281
3282                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3283                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3284
3285                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3286                                TXD_FLAG_CPU_POST_DMA);
3287
3288                 skb->nh.iph->check = 0;
3289                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3290                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3291                         skb->h.th->check = 0;
3292                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3293                 }
3294                 else {
3295                         skb->h.th->check =
3296                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3297                                                    skb->nh.iph->daddr,
3298                                                    0, IPPROTO_TCP, 0);
3299                 }
3300
3301                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3302                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3303                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3304                                 int tsflags;
3305
3306                                 tsflags = ((skb->nh.iph->ihl - 5) +
3307                                            (tcp_opt_len >> 2));
3308                                 mss |= (tsflags << 11);
3309                         }
3310                 } else {
3311                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3312                                 int tsflags;
3313
3314                                 tsflags = ((skb->nh.iph->ihl - 5) +
3315                                            (tcp_opt_len >> 2));
3316                                 base_flags |= tsflags << 12;
3317                         }
3318                 }
3319         }
3320 #else
3321         mss = 0;
3322 #endif
3323 #if TG3_VLAN_TAG_USED
3324         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3325                 base_flags |= (TXD_FLAG_VLAN |
3326                                (vlan_tx_tag_get(skb) << 16));
3327 #endif
3328
3329         /* Queue skb data, a.k.a. the main skb fragment. */
3330         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3331
3332         tp->tx_buffers[entry].skb = skb;
3333         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3334
3335         would_hit_hwbug = 0;
3336
3337         if (tg3_4g_overflow_test(mapping, len))
3338                 would_hit_hwbug = entry + 1;
3339
3340         tg3_set_txd(tp, entry, mapping, len, base_flags,
3341                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3342
3343         entry = NEXT_TX(entry);
3344
3345         /* Now loop through additional data fragments, and queue them. */
3346         if (skb_shinfo(skb)->nr_frags > 0) {
3347                 unsigned int i, last;
3348
3349                 last = skb_shinfo(skb)->nr_frags - 1;
3350                 for (i = 0; i <= last; i++) {
3351                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3352
3353                         len = frag->size;
3354                         mapping = pci_map_page(tp->pdev,
3355                                                frag->page,
3356                                                frag->page_offset,
3357                                                len, PCI_DMA_TODEVICE);
3358
3359                         tp->tx_buffers[entry].skb = NULL;
3360                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3361
3362                         if (tg3_4g_overflow_test(mapping, len)) {
3363                                 /* Only one should match. */
3364                                 if (would_hit_hwbug)
3365                                         BUG();
3366                                 would_hit_hwbug = entry + 1;
3367                         }
3368
3369                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3370                                 tg3_set_txd(tp, entry, mapping, len,
3371                                             base_flags, (i == last)|(mss << 1));
3372                         else
3373                                 tg3_set_txd(tp, entry, mapping, len,
3374                                             base_flags, (i == last));
3375
3376                         entry = NEXT_TX(entry);
3377                 }
3378         }
3379
3380         if (would_hit_hwbug) {
3381                 u32 last_plus_one = entry;
3382                 u32 start;
3383                 unsigned int len = 0;
3384
3385                 would_hit_hwbug -= 1;
3386                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3387                 entry &= (TG3_TX_RING_SIZE - 1);
3388                 start = entry;
3389                 i = 0;
3390                 while (entry != last_plus_one) {
3391                         if (i == 0)
3392                                 len = skb_headlen(skb);
3393                         else
3394                                 len = skb_shinfo(skb)->frags[i-1].size;
3395
3396                         if (entry == would_hit_hwbug)
3397                                 break;
3398
3399                         i++;
3400                         entry = NEXT_TX(entry);
3401
3402                 }
3403
3404                 /* If the workaround fails due to memory/mapping
3405                  * failure, silently drop this packet.
3406                  */
3407                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3408                                                 entry, len,
3409                                                 last_plus_one,
3410                                                 &start, mss))
3411                         goto out_unlock;
3412
3413                 entry = start;
3414         }
3415
3416         /* Packets are ready, update Tx producer idx local and on card. */
3417         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3418
3419         tp->tx_prod = entry;
3420         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3421                 netif_stop_queue(dev);
3422
3423 out_unlock:
3424         mmiowb();
3425         spin_unlock_irqrestore(&tp->tx_lock, flags);
3426
3427         dev->trans_start = jiffies;
3428
3429         return NETDEV_TX_OK;
3430 }
3431
3432 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3433                                int new_mtu)
3434 {
3435         dev->mtu = new_mtu;
3436
3437         if (new_mtu > ETH_DATA_LEN)
3438                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3439         else
3440                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3441 }
3442
3443 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3444 {
3445         struct tg3 *tp = netdev_priv(dev);
3446
3447         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3448                 return -EINVAL;
3449
3450         if (!netif_running(dev)) {
3451                 /* We'll just catch it later when the
3452                  * device is up'd.
3453                  */
3454                 tg3_set_mtu(dev, tp, new_mtu);
3455                 return 0;
3456         }
3457
3458         tg3_netif_stop(tp);
3459         spin_lock_irq(&tp->lock);
3460         spin_lock(&tp->tx_lock);
3461
3462         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3463
3464         tg3_set_mtu(dev, tp, new_mtu);
3465
3466         tg3_init_hw(tp);
3467
3468         tg3_netif_start(tp);
3469
3470         spin_unlock(&tp->tx_lock);
3471         spin_unlock_irq(&tp->lock);
3472
3473         return 0;
3474 }
3475
3476 /* Free up pending packets in all rx/tx rings.
3477  *
3478  * The chip has been shut down and the driver detached from
3479  * the networking, so no interrupts or new tx packets will
3480  * end up in the driver.  tp->{tx,}lock is not held and we are not
3481  * in an interrupt context and thus may sleep.
3482  */
3483 static void tg3_free_rings(struct tg3 *tp)
3484 {
3485         struct ring_info *rxp;
3486         int i;
3487
3488         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3489                 rxp = &tp->rx_std_buffers[i];
3490
3491                 if (rxp->skb == NULL)
3492                         continue;
3493                 pci_unmap_single(tp->pdev,
3494                                  pci_unmap_addr(rxp, mapping),
3495                                  RX_PKT_BUF_SZ - tp->rx_offset,
3496                                  PCI_DMA_FROMDEVICE);
3497                 dev_kfree_skb_any(rxp->skb);
3498                 rxp->skb = NULL;
3499         }
3500
3501         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3502                 rxp = &tp->rx_jumbo_buffers[i];
3503
3504                 if (rxp->skb == NULL)
3505                         continue;
3506                 pci_unmap_single(tp->pdev,
3507                                  pci_unmap_addr(rxp, mapping),
3508                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3509                                  PCI_DMA_FROMDEVICE);
3510                 dev_kfree_skb_any(rxp->skb);
3511                 rxp->skb = NULL;
3512         }
3513
3514         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3515                 struct tx_ring_info *txp;
3516                 struct sk_buff *skb;
3517                 int j;
3518
3519                 txp = &tp->tx_buffers[i];
3520                 skb = txp->skb;
3521
3522                 if (skb == NULL) {
3523                         i++;
3524                         continue;
3525                 }
3526
3527                 pci_unmap_single(tp->pdev,
3528                                  pci_unmap_addr(txp, mapping),
3529                                  skb_headlen(skb),
3530                                  PCI_DMA_TODEVICE);
3531                 txp->skb = NULL;
3532
3533                 i++;
3534
3535                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3536                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3537                         pci_unmap_page(tp->pdev,
3538                                        pci_unmap_addr(txp, mapping),
3539                                        skb_shinfo(skb)->frags[j].size,
3540                                        PCI_DMA_TODEVICE);
3541                         i++;
3542                 }
3543
3544                 dev_kfree_skb_any(skb);
3545         }
3546 }
3547
3548 /* Initialize tx/rx rings for packet processing.
3549  *
3550  * The chip has been shut down and the driver detached from
3551  * the networking, so no interrupts or new tx packets will
3552  * end up in the driver.  tp->{tx,}lock are held and thus
3553  * we may not sleep.
3554  */
3555 static void tg3_init_rings(struct tg3 *tp)
3556 {
3557         u32 i;
3558
3559         /* Free up all the SKBs. */
3560         tg3_free_rings(tp);
3561
3562         /* Zero out all descriptors. */
3563         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3564         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3565         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3566         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3567
3568         /* Initialize invariants of the rings, we only set this
3569          * stuff once.  This works because the card does not
3570          * write into the rx buffer posting rings.
3571          */
3572         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3573                 struct tg3_rx_buffer_desc *rxd;
3574
3575                 rxd = &tp->rx_std[i];
3576                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3577                         << RXD_LEN_SHIFT;
3578                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3579                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3580                                (i << RXD_OPAQUE_INDEX_SHIFT));
3581         }
3582
3583         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3584                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3585                         struct tg3_rx_buffer_desc *rxd;
3586
3587                         rxd = &tp->rx_jumbo[i];
3588                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3589                                 << RXD_LEN_SHIFT;
3590                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3591                                 RXD_FLAG_JUMBO;
3592                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3593                                (i << RXD_OPAQUE_INDEX_SHIFT));
3594                 }
3595         }
3596
3597         /* Now allocate fresh SKBs for each rx ring. */
3598         for (i = 0; i < tp->rx_pending; i++) {
3599                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3600                                      -1, i) < 0)
3601                         break;
3602         }
3603
3604         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3605                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3606                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3607                                              -1, i) < 0)
3608                                 break;
3609                 }
3610         }
3611 }
3612
3613 /*
3614  * Must not be invoked with interrupt sources disabled and
3615  * the hardware shutdown down.
3616  */
3617 static void tg3_free_consistent(struct tg3 *tp)
3618 {
3619         if (tp->rx_std_buffers) {
3620                 kfree(tp->rx_std_buffers);
3621                 tp->rx_std_buffers = NULL;
3622         }
3623         if (tp->rx_std) {
3624                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3625                                     tp->rx_std, tp->rx_std_mapping);
3626                 tp->rx_std = NULL;
3627         }
3628         if (tp->rx_jumbo) {
3629                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3630                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3631                 tp->rx_jumbo = NULL;
3632         }
3633         if (tp->rx_rcb) {
3634                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3635                                     tp->rx_rcb, tp->rx_rcb_mapping);
3636                 tp->rx_rcb = NULL;
3637         }
3638         if (tp->tx_ring) {
3639                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3640                         tp->tx_ring, tp->tx_desc_mapping);
3641                 tp->tx_ring = NULL;
3642         }
3643         if (tp->hw_status) {
3644                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3645                                     tp->hw_status, tp->status_mapping);
3646                 tp->hw_status = NULL;
3647         }
3648         if (tp->hw_stats) {
3649                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3650                                     tp->hw_stats, tp->stats_mapping);
3651                 tp->hw_stats = NULL;
3652         }
3653 }
3654
3655 /*
3656  * Must not be invoked with interrupt sources disabled and
3657  * the hardware shutdown down.  Can sleep.
3658  */
3659 static int tg3_alloc_consistent(struct tg3 *tp)
3660 {
3661         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3662                                       (TG3_RX_RING_SIZE +
3663                                        TG3_RX_JUMBO_RING_SIZE)) +
3664                                      (sizeof(struct tx_ring_info) *
3665                                       TG3_TX_RING_SIZE),
3666                                      GFP_KERNEL);
3667         if (!tp->rx_std_buffers)
3668                 return -ENOMEM;
3669
3670         memset(tp->rx_std_buffers, 0,
3671                (sizeof(struct ring_info) *
3672                 (TG3_RX_RING_SIZE +
3673                  TG3_RX_JUMBO_RING_SIZE)) +
3674                (sizeof(struct tx_ring_info) *
3675                 TG3_TX_RING_SIZE));
3676
3677         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3678         tp->tx_buffers = (struct tx_ring_info *)
3679                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3680
3681         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3682                                           &tp->rx_std_mapping);
3683         if (!tp->rx_std)
3684                 goto err_out;
3685
3686         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3687                                             &tp->rx_jumbo_mapping);
3688
3689         if (!tp->rx_jumbo)
3690                 goto err_out;
3691
3692         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3693                                           &tp->rx_rcb_mapping);
3694         if (!tp->rx_rcb)
3695                 goto err_out;
3696
3697         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3698                                            &tp->tx_desc_mapping);
3699         if (!tp->tx_ring)
3700                 goto err_out;
3701
3702         tp->hw_status = pci_alloc_consistent(tp->pdev,
3703                                              TG3_HW_STATUS_SIZE,
3704                                              &tp->status_mapping);
3705         if (!tp->hw_status)
3706                 goto err_out;
3707
3708         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3709                                             sizeof(struct tg3_hw_stats),
3710                                             &tp->stats_mapping);
3711         if (!tp->hw_stats)
3712                 goto err_out;
3713
3714         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3715         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3716
3717         return 0;
3718
3719 err_out:
3720         tg3_free_consistent(tp);
3721         return -ENOMEM;
3722 }
3723
3724 #define MAX_WAIT_CNT 1000
3725
3726 /* To stop a block, clear the enable bit and poll till it
3727  * clears.  tp->lock is held.
3728  */
3729 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3730 {
3731         unsigned int i;
3732         u32 val;
3733
3734         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3735                 switch (ofs) {
3736                 case RCVLSC_MODE:
3737                 case DMAC_MODE:
3738                 case MBFREE_MODE:
3739                 case BUFMGR_MODE:
3740                 case MEMARB_MODE:
3741                         /* We can't enable/disable these bits of the
3742                          * 5705/5750, just say success.
3743                          */
3744                         return 0;
3745
3746                 default:
3747                         break;
3748                 };
3749         }
3750
3751         val = tr32(ofs);
3752         val &= ~enable_bit;
3753         tw32_f(ofs, val);
3754
3755         for (i = 0; i < MAX_WAIT_CNT; i++) {
3756                 udelay(100);
3757                 val = tr32(ofs);
3758                 if ((val & enable_bit) == 0)
3759                         break;
3760         }
3761
3762         if (i == MAX_WAIT_CNT && !silent) {
3763                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3764                        "ofs=%lx enable_bit=%x\n",
3765                        ofs, enable_bit);
3766                 return -ENODEV;
3767         }
3768
3769         return 0;
3770 }
3771
3772 /* tp->lock is held. */
3773 static int tg3_abort_hw(struct tg3 *tp, int silent)
3774 {
3775         int i, err;
3776
3777         tg3_disable_ints(tp);
3778
3779         tp->rx_mode &= ~RX_MODE_ENABLE;
3780         tw32_f(MAC_RX_MODE, tp->rx_mode);
3781         udelay(10);
3782
3783         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3784         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3785         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3786         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3787         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3788         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3789
3790         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3791         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3792         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3793         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3794         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3795         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3796         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3797
3798         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3799         tw32_f(MAC_MODE, tp->mac_mode);
3800         udelay(40);
3801
3802         tp->tx_mode &= ~TX_MODE_ENABLE;
3803         tw32_f(MAC_TX_MODE, tp->tx_mode);
3804
3805         for (i = 0; i < MAX_WAIT_CNT; i++) {
3806                 udelay(100);
3807                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3808                         break;
3809         }
3810         if (i >= MAX_WAIT_CNT) {
3811                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3812                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3813                        tp->dev->name, tr32(MAC_TX_MODE));
3814                 err |= -ENODEV;
3815         }
3816
3817         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3818         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3819         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3820
3821         tw32(FTQ_RESET, 0xffffffff);
3822         tw32(FTQ_RESET, 0x00000000);
3823
3824         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3825         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3826
3827         if (tp->hw_status)
3828                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3829         if (tp->hw_stats)
3830                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3831
3832         return err;
3833 }
3834
3835 /* tp->lock is held. */
3836 static int tg3_nvram_lock(struct tg3 *tp)
3837 {
3838         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3839                 int i;
3840
3841                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3842                 for (i = 0; i < 8000; i++) {
3843                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3844                                 break;
3845                         udelay(20);
3846                 }
3847                 if (i == 8000)
3848                         return -ENODEV;
3849         }
3850         return 0;
3851 }
3852
3853 /* tp->lock is held. */
3854 static void tg3_nvram_unlock(struct tg3 *tp)
3855 {
3856         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3857                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3858 }
3859
3860 /* tp->lock is held. */
3861 static void tg3_enable_nvram_access(struct tg3 *tp)
3862 {
3863         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3864             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3865                 u32 nvaccess = tr32(NVRAM_ACCESS);
3866
3867                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3868         }
3869 }
3870
3871 /* tp->lock is held. */
3872 static void tg3_disable_nvram_access(struct tg3 *tp)
3873 {
3874         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3875             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3876                 u32 nvaccess = tr32(NVRAM_ACCESS);
3877
3878                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3879         }
3880 }
3881
3882 /* tp->lock is held. */
3883 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3884 {
3885         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3886                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3887                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3888
3889         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3890                 switch (kind) {
3891                 case RESET_KIND_INIT:
3892                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3893                                       DRV_STATE_START);
3894                         break;
3895
3896                 case RESET_KIND_SHUTDOWN:
3897                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3898                                       DRV_STATE_UNLOAD);
3899                         break;
3900
3901                 case RESET_KIND_SUSPEND:
3902                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3903                                       DRV_STATE_SUSPEND);
3904                         break;
3905
3906                 default:
3907                         break;
3908                 };
3909         }
3910 }
3911
3912 /* tp->lock is held. */
3913 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3914 {
3915         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3916                 switch (kind) {
3917                 case RESET_KIND_INIT:
3918                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3919                                       DRV_STATE_START_DONE);
3920                         break;
3921
3922                 case RESET_KIND_SHUTDOWN:
3923                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3924                                       DRV_STATE_UNLOAD_DONE);
3925                         break;
3926
3927                 default:
3928                         break;
3929                 };
3930         }
3931 }
3932
3933 /* tp->lock is held. */
3934 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3935 {
3936         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3937                 switch (kind) {
3938                 case RESET_KIND_INIT:
3939                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3940                                       DRV_STATE_START);
3941                         break;
3942
3943                 case RESET_KIND_SHUTDOWN:
3944                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3945                                       DRV_STATE_UNLOAD);
3946                         break;
3947
3948                 case RESET_KIND_SUSPEND:
3949                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3950                                       DRV_STATE_SUSPEND);
3951                         break;
3952
3953                 default:
3954                         break;
3955                 };
3956         }
3957 }
3958
3959 static void tg3_stop_fw(struct tg3 *);
3960
3961 /* tp->lock is held. */
3962 static int tg3_chip_reset(struct tg3 *tp)
3963 {
3964         u32 val;
3965         u32 flags_save;
3966         int i;
3967
3968         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3969                 tg3_nvram_lock(tp);
3970
3971         /*
3972          * We must avoid the readl() that normally takes place.
3973          * It locks machines, causes machine checks, and other
3974          * fun things.  So, temporarily disable the 5701
3975          * hardware workaround, while we do the reset.
3976          */
3977         flags_save = tp->tg3_flags;
3978         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3979
3980         /* do the reset */
3981         val = GRC_MISC_CFG_CORECLK_RESET;
3982
3983         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3984                 if (tr32(0x7e2c) == 0x60) {
3985                         tw32(0x7e2c, 0x20);
3986                 }
3987                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3988                         tw32(GRC_MISC_CFG, (1 << 29));
3989                         val |= (1 << 29);
3990                 }
3991         }
3992
3993         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3994                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3995         tw32(GRC_MISC_CFG, val);
3996
3997         /* restore 5701 hardware bug workaround flag */
3998         tp->tg3_flags = flags_save;
3999
4000         /* Unfortunately, we have to delay before the PCI read back.
4001          * Some 575X chips even will not respond to a PCI cfg access
4002          * when the reset command is given to the chip.
4003          *
4004          * How do these hardware designers expect things to work
4005          * properly if the PCI write is posted for a long period
4006          * of time?  It is always necessary to have some method by
4007          * which a register read back can occur to push the write
4008          * out which does the reset.
4009          *
4010          * For most tg3 variants the trick below was working.
4011          * Ho hum...
4012          */
4013         udelay(120);
4014
4015         /* Flush PCI posted writes.  The normal MMIO registers
4016          * are inaccessible at this time so this is the only
4017          * way to make this reliably (actually, this is no longer
4018          * the case, see above).  I tried to use indirect
4019          * register read/write but this upset some 5701 variants.
4020          */
4021         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4022
4023         udelay(120);
4024
4025         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4026                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4027                         int i;
4028                         u32 cfg_val;
4029
4030                         /* Wait for link training to complete.  */
4031                         for (i = 0; i < 5000; i++)
4032                                 udelay(100);
4033
4034                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4035                         pci_write_config_dword(tp->pdev, 0xc4,
4036                                                cfg_val | (1 << 15));
4037                 }
4038                 /* Set PCIE max payload size and clear error status.  */
4039                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4040         }
4041
4042         /* Re-enable indirect register accesses. */
4043         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4044                                tp->misc_host_ctrl);
4045
4046         /* Set MAX PCI retry to zero. */
4047         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4048         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4049             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4050                 val |= PCISTATE_RETRY_SAME_DMA;
4051         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4052
4053         pci_restore_state(tp->pdev);
4054
4055         /* Make sure PCI-X relaxed ordering bit is clear. */
4056         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4057         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4058         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4059
4060         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4061
4062         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4063                 tg3_stop_fw(tp);
4064                 tw32(0x5000, 0x400);
4065         }
4066
4067         tw32(GRC_MODE, tp->grc_mode);
4068
4069         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4070                 u32 val = tr32(0xc4);
4071
4072                 tw32(0xc4, val | (1 << 15));
4073         }
4074
4075         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4076             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4077                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4078                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4079                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4080                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4081         }
4082
4083         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4084                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4085                 tw32_f(MAC_MODE, tp->mac_mode);
4086         } else
4087                 tw32_f(MAC_MODE, 0);
4088         udelay(40);
4089
4090         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4091                 /* Wait for firmware initialization to complete. */
4092                 for (i = 0; i < 100000; i++) {
4093                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4094                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4095                                 break;
4096                         udelay(10);
4097                 }
4098                 if (i >= 100000) {
4099                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4100                                "firmware will not restart magic=%08x\n",
4101                                tp->dev->name, val);
4102                         return -ENODEV;
4103                 }
4104         }
4105
4106         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4107             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4108                 u32 val = tr32(0x7c00);
4109
4110                 tw32(0x7c00, val | (1 << 25));
4111         }
4112
4113         /* Reprobe ASF enable state.  */
4114         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4115         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4116         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4117         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4118                 u32 nic_cfg;
4119
4120                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4121                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4122                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4123                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4124                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4125                 }
4126         }
4127
4128         return 0;
4129 }
4130
4131 /* tp->lock is held. */
4132 static void tg3_stop_fw(struct tg3 *tp)
4133 {
4134         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4135                 u32 val;
4136                 int i;
4137
4138                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4139                 val = tr32(GRC_RX_CPU_EVENT);
4140                 val |= (1 << 14);
4141                 tw32(GRC_RX_CPU_EVENT, val);
4142
4143                 /* Wait for RX cpu to ACK the event.  */
4144                 for (i = 0; i < 100; i++) {
4145                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4146                                 break;
4147                         udelay(1);
4148                 }
4149         }
4150 }
4151
4152 /* tp->lock is held. */
4153 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4154 {
4155         int err;
4156
4157         tg3_stop_fw(tp);
4158
4159         tg3_write_sig_pre_reset(tp, kind);
4160
4161         tg3_abort_hw(tp, silent);
4162         err = tg3_chip_reset(tp);
4163
4164         tg3_write_sig_legacy(tp, kind);
4165         tg3_write_sig_post_reset(tp, kind);
4166
4167         if (err)
4168                 return err;
4169
4170         return 0;
4171 }
4172
4173 #define TG3_FW_RELEASE_MAJOR    0x0
4174 #define TG3_FW_RELASE_MINOR     0x0
4175 #define TG3_FW_RELEASE_FIX      0x0
4176 #define TG3_FW_START_ADDR       0x08000000
4177 #define TG3_FW_TEXT_ADDR        0x08000000
4178 #define TG3_FW_TEXT_LEN         0x9c0
4179 #define TG3_FW_RODATA_ADDR      0x080009c0
4180 #define TG3_FW_RODATA_LEN       0x60
4181 #define TG3_FW_DATA_ADDR        0x08000a40
4182 #define TG3_FW_DATA_LEN         0x20
4183 #define TG3_FW_SBSS_ADDR        0x08000a60
4184 #define TG3_FW_SBSS_LEN         0xc
4185 #define TG3_FW_BSS_ADDR         0x08000a70
4186 #define TG3_FW_BSS_LEN          0x10
4187
4188 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4189         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4190         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4191         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4192         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4193         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4194         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4195         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4196         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4197         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4198         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4199         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4200         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4201         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4202         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4203         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4204         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4205         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4206         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4207         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4208         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4209         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4210         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4211         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4212         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4213         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4214         0, 0, 0, 0, 0, 0,
4215         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4216         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4217         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4218         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4219         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4220         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4221         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4222         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4223         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4224         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4225         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4226         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4227         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4228         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4229         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4230         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4231         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4232         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4233         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4234         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4235         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4236         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4237         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4238         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4239         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4240         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4241         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4242         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4243         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4244         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4245         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4246         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4247         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4248         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4249         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4250         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4251         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4252         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4253         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4254         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4255         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4256         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4257         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4258         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4259         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4260         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4261         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4262         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4263         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4264         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4265         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4266         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4267         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4268         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4269         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4270         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4271         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4272         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4273         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4274         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4275         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4276         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4277         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4278         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4279         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4280 };
4281
4282 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4283         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4284         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4285         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4286         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4287         0x00000000
4288 };
4289
4290 #if 0 /* All zeros, don't eat up space with it. */
4291 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4292         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4293         0x00000000, 0x00000000, 0x00000000, 0x00000000
4294 };
4295 #endif
4296
4297 #define RX_CPU_SCRATCH_BASE     0x30000
4298 #define RX_CPU_SCRATCH_SIZE     0x04000
4299 #define TX_CPU_SCRATCH_BASE     0x34000
4300 #define TX_CPU_SCRATCH_SIZE     0x04000
4301
4302 /* tp->lock is held. */
4303 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4304 {
4305         int i;
4306
4307         if (offset == TX_CPU_BASE &&
4308             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4309                 BUG();
4310
4311         if (offset == RX_CPU_BASE) {
4312                 for (i = 0; i < 10000; i++) {
4313                         tw32(offset + CPU_STATE, 0xffffffff);
4314                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4315                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4316                                 break;
4317                 }
4318
4319                 tw32(offset + CPU_STATE, 0xffffffff);
4320                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4321                 udelay(10);
4322         } else {
4323                 for (i = 0; i < 10000; i++) {
4324                         tw32(offset + CPU_STATE, 0xffffffff);
4325                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4326                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4327                                 break;
4328                 }
4329         }
4330
4331         if (i >= 10000) {
4332                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4333                        "and %s CPU\n",
4334                        tp->dev->name,
4335                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4336                 return -ENODEV;
4337         }
4338         return 0;
4339 }
4340
4341 struct fw_info {
4342         unsigned int text_base;
4343         unsigned int text_len;
4344         u32 *text_data;
4345         unsigned int rodata_base;
4346         unsigned int rodata_len;
4347         u32 *rodata_data;
4348         unsigned int data_base;
4349         unsigned int data_len;
4350         u32 *data_data;
4351 };
4352
4353 /* tp->lock is held. */
4354 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4355                                  int cpu_scratch_size, struct fw_info *info)
4356 {
4357         int err, i;
4358         u32 orig_tg3_flags = tp->tg3_flags;
4359         void (*write_op)(struct tg3 *, u32, u32);
4360
4361         if (cpu_base == TX_CPU_BASE &&
4362             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4363                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4364                        "TX cpu firmware on %s which is 5705.\n",
4365                        tp->dev->name);
4366                 return -EINVAL;
4367         }
4368
4369         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4370                 write_op = tg3_write_mem;
4371         else
4372                 write_op = tg3_write_indirect_reg32;
4373
4374         /* Force use of PCI config space for indirect register
4375          * write calls.
4376          */
4377         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4378
4379         /* It is possible that bootcode is still loading at this point.
4380          * Get the nvram lock first before halting the cpu.
4381          */
4382         tg3_nvram_lock(tp);
4383         err = tg3_halt_cpu(tp, cpu_base);
4384         tg3_nvram_unlock(tp);
4385         if (err)
4386                 goto out;
4387
4388         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4389                 write_op(tp, cpu_scratch_base + i, 0);
4390         tw32(cpu_base + CPU_STATE, 0xffffffff);
4391         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4392         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4393                 write_op(tp, (cpu_scratch_base +
4394                               (info->text_base & 0xffff) +
4395                               (i * sizeof(u32))),
4396                          (info->text_data ?
4397                           info->text_data[i] : 0));
4398         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4399                 write_op(tp, (cpu_scratch_base +
4400                               (info->rodata_base & 0xffff) +
4401                               (i * sizeof(u32))),
4402                          (info->rodata_data ?
4403                           info->rodata_data[i] : 0));
4404         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4405                 write_op(tp, (cpu_scratch_base +
4406                               (info->data_base & 0xffff) +
4407                               (i * sizeof(u32))),
4408                          (info->data_data ?
4409                           info->data_data[i] : 0));
4410
4411         err = 0;
4412
4413 out:
4414         tp->tg3_flags = orig_tg3_flags;
4415         return err;
4416 }
4417
4418 /* tp->lock is held. */
4419 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4420 {
4421         struct fw_info info;
4422         int err, i;
4423
4424         info.text_base = TG3_FW_TEXT_ADDR;
4425         info.text_len = TG3_FW_TEXT_LEN;
4426         info.text_data = &tg3FwText[0];
4427         info.rodata_base = TG3_FW_RODATA_ADDR;
4428         info.rodata_len = TG3_FW_RODATA_LEN;
4429         info.rodata_data = &tg3FwRodata[0];
4430         info.data_base = TG3_FW_DATA_ADDR;
4431         info.data_len = TG3_FW_DATA_LEN;
4432         info.data_data = NULL;
4433
4434         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4435                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4436                                     &info);
4437         if (err)
4438                 return err;
4439
4440         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4441                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4442                                     &info);
4443         if (err)
4444                 return err;
4445
4446         /* Now startup only the RX cpu. */
4447         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4448         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4449
4450         for (i = 0; i < 5; i++) {
4451                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4452                         break;
4453                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4454                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4455                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4456                 udelay(1000);
4457         }
4458         if (i >= 5) {
4459                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4460                        "to set RX CPU PC, is %08x should be %08x\n",
4461                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4462                        TG3_FW_TEXT_ADDR);
4463                 return -ENODEV;
4464         }
4465         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4466         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4467
4468         return 0;
4469 }
4470
4471 #if TG3_TSO_SUPPORT != 0
4472
4473 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4474 #define TG3_TSO_FW_RELASE_MINOR         0x6
4475 #define TG3_TSO_FW_RELEASE_FIX          0x0
4476 #define TG3_TSO_FW_START_ADDR           0x08000000
4477 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4478 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4479 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4480 #define TG3_TSO_FW_RODATA_LEN           0x60
4481 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4482 #define TG3_TSO_FW_DATA_LEN             0x30
4483 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4484 #define TG3_TSO_FW_SBSS_LEN             0x2c
4485 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4486 #define TG3_TSO_FW_BSS_LEN              0x894
4487
4488 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4489         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4490         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4491         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4492         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4493         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4494         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4495         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4496         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4497         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4498         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4499         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4500         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4501         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4502         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4503         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4504         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4505         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4506         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4507         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4508         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4509         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4510         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4511         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4512         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4513         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4514         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4515         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4516         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4517         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4518         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4519         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4520         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4521         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4522         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4523         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4524         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4525         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4526         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4527         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4528         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4529         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4530         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4531         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4532         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4533         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4534         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4535         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4536         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4537         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4538         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4539         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4540         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4541         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4542         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4543         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4544         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4545         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4546         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4547         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4548         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4549         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4550         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4551         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4552         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4553         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4554         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4555         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4556         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4557         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4558         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4559         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4560         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4561         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4562         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4563         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4564         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4565         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4566         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4567         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4568         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4569         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4570         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4571         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4572         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4573         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4574         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4575         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4576         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4577         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4578         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4579         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4580         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4581         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4582         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4583         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4584         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4585         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4586         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4587         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4588         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4589         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4590         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4591         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4592         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4593         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4594         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4595         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4596         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4597         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4598         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4599         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4600         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4601         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4602         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4603         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4604         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4605         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4606         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4607         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4608         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4609         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4610         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4611         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4612         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4613         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4614         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4615         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4616         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4617         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4618         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4619         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4620         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4621         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4622         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4623         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4624         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4625         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4626         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4627         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4628         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4629         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4630         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4631         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4632         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4633         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4634         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4635         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4636         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4637         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4638         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4639         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4640         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4641         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4642         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4643         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4644         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4645         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4646         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4647         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4648         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4649         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4650         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4651         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4652         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4653         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4654         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4655         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4656         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4657         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4658         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4659         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4660         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4661         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4662         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4663         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4664         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4665         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4666         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4667         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4668         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4669         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4670         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4671         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4672         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4673         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4674         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4675         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4676         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4677         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4678         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4679         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4680         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4681         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4682         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4683         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4684         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4685         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4686         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4687         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4688         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4689         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4690         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4691         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4692         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4693         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4694         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4695         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4696         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4697         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4698         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4699         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4700         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4701         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4702         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4703         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4704         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4705         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4706         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4707         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4708         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4709         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4710         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4711         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4712         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4713         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4714         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4715         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4716         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4717         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4718         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4719         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4720         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4721         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4722         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4723         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4724         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4725         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4726         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4727         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4728         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4729         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4730         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4731         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4732         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4733         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4734         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4735         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4736         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4737         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4738         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4739         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4740         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4741         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4742         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4743         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4744         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4745         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4746         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4747         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4748         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4749         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4750         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4751         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4752         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4753         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4754         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4755         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4756         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4757         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4758         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4759         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4760         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4761         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4762         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4763         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4764         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4765         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4766         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4767         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4768         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4769         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4770         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4771         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4772         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4773 };
4774
4775 static u32 tg3TsoFwRodata[] = {
4776         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4777         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4778         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4779         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4780         0x00000000,
4781 };
4782
4783 static u32 tg3TsoFwData[] = {
4784         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4785         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4786         0x00000000,
4787 };
4788
4789 /* 5705 needs a special version of the TSO firmware.  */
4790 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4791 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4792 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4793 #define TG3_TSO5_FW_START_ADDR          0x00010000
4794 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4795 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4796 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4797 #define TG3_TSO5_FW_RODATA_LEN          0x50
4798 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4799 #define TG3_TSO5_FW_DATA_LEN            0x20
4800 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4801 #define TG3_TSO5_FW_SBSS_LEN            0x28
4802 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4803 #define TG3_TSO5_FW_BSS_LEN             0x88
4804
4805 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4806         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4807         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4808         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4809         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4810         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4811         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4812         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4813         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4814         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4815         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4816         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4817         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4818         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4819         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4820         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4821         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4822         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4823         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4824         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4825         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4826         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4827         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4828         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4829         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4830         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4831         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4832         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4833         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4834         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4835         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4836         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4837         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4838         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4839         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4840         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4841         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4842         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4843         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4844         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4845         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4846         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4847         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4848         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4849         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4850         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4851         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4852         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4853         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4854         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4855         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4856         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4857         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4858         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4859         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4860         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4861         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4862         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4863         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4864         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4865         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4866         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4867         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4868         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4869         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4870         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4871         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4872         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4873         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4874         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4875         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4876         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4877         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4878         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4879         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4880         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4881         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4882         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4883         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4884         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4885         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4886         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4887         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4888         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4889         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4890         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4891         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4892         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4893         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4894         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4895         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4896         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4897         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4898         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4899         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4900         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4901         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4902         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4903         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4904         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4905         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4906         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4907         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4908         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4909         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4910         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4911         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4912         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4913         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4914         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4915         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4916         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4917         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4918         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4919         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4920         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4921         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4922         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4923         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4924         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4925         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4926         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4927         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4928         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4929         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4930         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4931         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4932         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4933         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4934         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4935         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4936         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4937         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4938         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4939         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4940         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4941         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4942         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4943         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4944         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4945         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4946         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4947         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4948         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4949         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4950         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4951         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4952         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4953         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4954         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4955         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4956         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4957         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4958         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4959         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4960         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4961         0x00000000, 0x00000000, 0x00000000,
4962 };
4963
4964 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4965         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4966         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4967         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4968         0x00000000, 0x00000000, 0x00000000,
4969 };
4970
4971 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4972         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4973         0x00000000, 0x00000000, 0x00000000,
4974 };
4975
4976 /* tp->lock is held. */
4977 static int tg3_load_tso_firmware(struct tg3 *tp)
4978 {
4979         struct fw_info info;
4980         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4981         int err, i;
4982
4983         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4984                 return 0;
4985
4986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4987                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4988                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4989                 info.text_data = &tg3Tso5FwText[0];
4990                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4991                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4992                 info.rodata_data = &tg3Tso5FwRodata[0];
4993                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4994                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4995                 info.data_data = &tg3Tso5FwData[0];
4996                 cpu_base = RX_CPU_BASE;
4997                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4998                 cpu_scratch_size = (info.text_len +
4999                                     info.rodata_len +
5000                                     info.data_len +
5001                                     TG3_TSO5_FW_SBSS_LEN +
5002                                     TG3_TSO5_FW_BSS_LEN);
5003         } else {
5004                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5005                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5006                 info.text_data = &tg3TsoFwText[0];
5007                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5008                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5009                 info.rodata_data = &tg3TsoFwRodata[0];
5010                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5011                 info.data_len = TG3_TSO_FW_DATA_LEN;
5012                 info.data_data = &tg3TsoFwData[0];
5013                 cpu_base = TX_CPU_BASE;
5014                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5015                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5016         }
5017
5018         err = tg3_load_firmware_cpu(tp, cpu_base,
5019                                     cpu_scratch_base, cpu_scratch_size,
5020                                     &info);
5021         if (err)
5022                 return err;
5023
5024         /* Now startup the cpu. */
5025         tw32(cpu_base + CPU_STATE, 0xffffffff);
5026         tw32_f(cpu_base + CPU_PC,    info.text_base);
5027
5028         for (i = 0; i < 5; i++) {
5029                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5030                         break;
5031                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5032                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5033                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5034                 udelay(1000);
5035         }
5036         if (i >= 5) {
5037                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5038                        "to set CPU PC, is %08x should be %08x\n",
5039                        tp->dev->name, tr32(cpu_base + CPU_PC),
5040                        info.text_base);
5041                 return -ENODEV;
5042         }
5043         tw32(cpu_base + CPU_STATE, 0xffffffff);
5044         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5045         return 0;
5046 }
5047
5048 #endif /* TG3_TSO_SUPPORT != 0 */
5049
5050 /* tp->lock is held. */
5051 static void __tg3_set_mac_addr(struct tg3 *tp)
5052 {
5053         u32 addr_high, addr_low;
5054         int i;
5055
5056         addr_high = ((tp->dev->dev_addr[0] << 8) |
5057                      tp->dev->dev_addr[1]);
5058         addr_low = ((tp->dev->dev_addr[2] << 24) |
5059                     (tp->dev->dev_addr[3] << 16) |
5060                     (tp->dev->dev_addr[4] <<  8) |
5061                     (tp->dev->dev_addr[5] <<  0));
5062         for (i = 0; i < 4; i++) {
5063                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5064                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5065         }
5066
5067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5069                 for (i = 0; i < 12; i++) {
5070                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5071                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5072                 }
5073         }
5074
5075         addr_high = (tp->dev->dev_addr[0] +
5076                      tp->dev->dev_addr[1] +
5077                      tp->dev->dev_addr[2] +
5078                      tp->dev->dev_addr[3] +
5079                      tp->dev->dev_addr[4] +
5080                      tp->dev->dev_addr[5]) &
5081                 TX_BACKOFF_SEED_MASK;
5082         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5083 }
5084
5085 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5086 {
5087         struct tg3 *tp = netdev_priv(dev);
5088         struct sockaddr *addr = p;
5089
5090         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5091
5092         spin_lock_irq(&tp->lock);
5093         __tg3_set_mac_addr(tp);
5094         spin_unlock_irq(&tp->lock);
5095
5096         return 0;
5097 }
5098
5099 /* tp->lock is held. */
5100 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5101                            dma_addr_t mapping, u32 maxlen_flags,
5102                            u32 nic_addr)
5103 {
5104         tg3_write_mem(tp,
5105                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5106                       ((u64) mapping >> 32));
5107         tg3_write_mem(tp,
5108                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5109                       ((u64) mapping & 0xffffffff));
5110         tg3_write_mem(tp,
5111                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5112                        maxlen_flags);
5113
5114         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5115                 tg3_write_mem(tp,
5116                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5117                               nic_addr);
5118 }
5119
5120 static void __tg3_set_rx_mode(struct net_device *);
5121 static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5122 {
5123         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5124         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5125         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5126         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5127         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5128                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5129                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5130         }
5131         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5132         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5133         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5134                 u32 val = ec->stats_block_coalesce_usecs;
5135
5136                 if (!netif_carrier_ok(tp->dev))
5137                         val = 0;
5138
5139                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5140         }
5141 }
5142
5143 /* tp->lock is held. */
5144 static int tg3_reset_hw(struct tg3 *tp)
5145 {
5146         u32 val, rdmac_mode;
5147         int i, err, limit;
5148
5149         tg3_disable_ints(tp);
5150
5151         tg3_stop_fw(tp);
5152
5153         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5154
5155         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5156                 tg3_abort_hw(tp, 1);
5157         }
5158
5159         err = tg3_chip_reset(tp);
5160         if (err)
5161                 return err;
5162
5163         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5164
5165         /* This works around an issue with Athlon chipsets on
5166          * B3 tigon3 silicon.  This bit has no effect on any
5167          * other revision.  But do not set this on PCI Express
5168          * chips.
5169          */
5170         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5171                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5172         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5173
5174         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5175             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5176                 val = tr32(TG3PCI_PCISTATE);
5177                 val |= PCISTATE_RETRY_SAME_DMA;
5178                 tw32(TG3PCI_PCISTATE, val);
5179         }
5180
5181         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5182                 /* Enable some hw fixes.  */
5183                 val = tr32(TG3PCI_MSI_DATA);
5184                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5185                 tw32(TG3PCI_MSI_DATA, val);
5186         }
5187
5188         /* Descriptor ring init may make accesses to the
5189          * NIC SRAM area to setup the TX descriptors, so we
5190          * can only do this after the hardware has been
5191          * successfully reset.
5192          */
5193         tg3_init_rings(tp);
5194
5195         /* This value is determined during the probe time DMA
5196          * engine test, tg3_test_dma.
5197          */
5198         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5199
5200         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5201                           GRC_MODE_4X_NIC_SEND_RINGS |
5202                           GRC_MODE_NO_TX_PHDR_CSUM |
5203                           GRC_MODE_NO_RX_PHDR_CSUM);
5204         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5205         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5206                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5207         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5208                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5209
5210         tw32(GRC_MODE,
5211              tp->grc_mode |
5212              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5213
5214         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5215         val = tr32(GRC_MISC_CFG);
5216         val &= ~0xff;
5217         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5218         tw32(GRC_MISC_CFG, val);
5219
5220         /* Initialize MBUF/DESC pool. */
5221         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5222                 /* Do nothing.  */
5223         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5224                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5225                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5226                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5227                 else
5228                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5229                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5230                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5231         }
5232 #if TG3_TSO_SUPPORT != 0
5233         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5234                 int fw_len;
5235
5236                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5237                           TG3_TSO5_FW_RODATA_LEN +
5238                           TG3_TSO5_FW_DATA_LEN +
5239                           TG3_TSO5_FW_SBSS_LEN +
5240                           TG3_TSO5_FW_BSS_LEN);
5241                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5242                 tw32(BUFMGR_MB_POOL_ADDR,
5243                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5244                 tw32(BUFMGR_MB_POOL_SIZE,
5245                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5246         }
5247 #endif
5248
5249         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5250                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5251                      tp->bufmgr_config.mbuf_read_dma_low_water);
5252                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5253                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5254                 tw32(BUFMGR_MB_HIGH_WATER,
5255                      tp->bufmgr_config.mbuf_high_water);
5256         } else {
5257                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5258                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5259                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5260                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5261                 tw32(BUFMGR_MB_HIGH_WATER,
5262                      tp->bufmgr_config.mbuf_high_water_jumbo);
5263         }
5264         tw32(BUFMGR_DMA_LOW_WATER,
5265              tp->bufmgr_config.dma_low_water);
5266         tw32(BUFMGR_DMA_HIGH_WATER,
5267              tp->bufmgr_config.dma_high_water);
5268
5269         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5270         for (i = 0; i < 2000; i++) {
5271                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5272                         break;
5273                 udelay(10);
5274         }
5275         if (i >= 2000) {
5276                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5277                        tp->dev->name);
5278                 return -ENODEV;
5279         }
5280
5281         /* Setup replenish threshold. */
5282         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5283
5284         /* Initialize TG3_BDINFO's at:
5285          *  RCVDBDI_STD_BD:     standard eth size rx ring
5286          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5287          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5288          *
5289          * like so:
5290          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5291          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5292          *                              ring attribute flags
5293          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5294          *
5295          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5296          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5297          *
5298          * The size of each ring is fixed in the firmware, but the location is
5299          * configurable.
5300          */
5301         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5302              ((u64) tp->rx_std_mapping >> 32));
5303         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5304              ((u64) tp->rx_std_mapping & 0xffffffff));
5305         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5306              NIC_SRAM_RX_BUFFER_DESC);
5307
5308         /* Don't even try to program the JUMBO/MINI buffer descriptor
5309          * configs on 5705.
5310          */
5311         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5312                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5313                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5314         } else {
5315                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5316                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5317
5318                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5319                      BDINFO_FLAGS_DISABLED);
5320
5321                 /* Setup replenish threshold. */
5322                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5323
5324                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5325                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5326                              ((u64) tp->rx_jumbo_mapping >> 32));
5327                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5328                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5329                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5330                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5331                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5332                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5333                 } else {
5334                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5335                              BDINFO_FLAGS_DISABLED);
5336                 }
5337
5338         }
5339
5340         /* There is only one send ring on 5705/5750, no need to explicitly
5341          * disable the others.
5342          */
5343         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5344                 /* Clear out send RCB ring in SRAM. */
5345                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5346                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5347                                       BDINFO_FLAGS_DISABLED);
5348         }
5349
5350         tp->tx_prod = 0;
5351         tp->tx_cons = 0;
5352         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5353         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5354
5355         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5356                        tp->tx_desc_mapping,
5357                        (TG3_TX_RING_SIZE <<
5358                         BDINFO_FLAGS_MAXLEN_SHIFT),
5359                        NIC_SRAM_TX_BUFFER_DESC);
5360
5361         /* There is only one receive return ring on 5705/5750, no need
5362          * to explicitly disable the others.
5363          */
5364         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5365                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5366                      i += TG3_BDINFO_SIZE) {
5367                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5368                                       BDINFO_FLAGS_DISABLED);
5369                 }
5370         }
5371
5372         tp->rx_rcb_ptr = 0;
5373         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5374
5375         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5376                        tp->rx_rcb_mapping,
5377                        (TG3_RX_RCB_RING_SIZE(tp) <<
5378                         BDINFO_FLAGS_MAXLEN_SHIFT),
5379                        0);
5380
5381         tp->rx_std_ptr = tp->rx_pending;
5382         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5383                      tp->rx_std_ptr);
5384
5385         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5386                                                 tp->rx_jumbo_pending : 0;
5387         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5388                      tp->rx_jumbo_ptr);
5389
5390         /* Initialize MAC address and backoff seed. */
5391         __tg3_set_mac_addr(tp);
5392
5393         /* MTU + ethernet header + FCS + optional VLAN tag */
5394         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5395
5396         /* The slot time is changed by tg3_setup_phy if we
5397          * run at gigabit with half duplex.
5398          */
5399         tw32(MAC_TX_LENGTHS,
5400              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5401              (6 << TX_LENGTHS_IPG_SHIFT) |
5402              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5403
5404         /* Receive rules. */
5405         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5406         tw32(RCVLPC_CONFIG, 0x0181);
5407
5408         /* Calculate RDMAC_MODE setting early, we need it to determine
5409          * the RCVLPC_STATE_ENABLE mask.
5410          */
5411         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5412                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5413                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5414                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5415                       RDMAC_MODE_LNGREAD_ENAB);
5416         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5417                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5418
5419         /* If statement applies to 5705 and 5750 PCI devices only */
5420         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5421              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5422             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5423                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5424                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5425                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5426                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5427                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5428                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5429                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5430                 }
5431         }
5432
5433         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5434                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5435
5436 #if TG3_TSO_SUPPORT != 0
5437         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5438                 rdmac_mode |= (1 << 27);
5439 #endif
5440
5441         /* Receive/send statistics. */
5442         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5443             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5444                 val = tr32(RCVLPC_STATS_ENABLE);
5445                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5446                 tw32(RCVLPC_STATS_ENABLE, val);
5447         } else {
5448                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5449         }
5450         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5451         tw32(SNDDATAI_STATSENAB, 0xffffff);
5452         tw32(SNDDATAI_STATSCTRL,
5453              (SNDDATAI_SCTRL_ENABLE |
5454               SNDDATAI_SCTRL_FASTUPD));
5455
5456         /* Setup host coalescing engine. */
5457         tw32(HOSTCC_MODE, 0);
5458         for (i = 0; i < 2000; i++) {
5459                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5460                         break;
5461                 udelay(10);
5462         }
5463
5464         tg3_set_coalesce(tp, &tp->coal);
5465
5466         /* set status block DMA address */
5467         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5468              ((u64) tp->status_mapping >> 32));
5469         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5470              ((u64) tp->status_mapping & 0xffffffff));
5471
5472         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5473                 /* Status/statistics block address.  See tg3_timer,
5474                  * the tg3_periodic_fetch_stats call there, and
5475                  * tg3_get_stats to see how this works for 5705/5750 chips.
5476                  */
5477                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5478                      ((u64) tp->stats_mapping >> 32));
5479                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5480                      ((u64) tp->stats_mapping & 0xffffffff));
5481                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5482                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5483         }
5484
5485         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5486
5487         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5488         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5489         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5490                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5491
5492         /* Clear statistics/status block in chip, and status block in ram. */
5493         for (i = NIC_SRAM_STATS_BLK;
5494              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5495              i += sizeof(u32)) {
5496                 tg3_write_mem(tp, i, 0);
5497                 udelay(40);
5498         }
5499         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5500
5501         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5502                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5503         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5504         udelay(40);
5505
5506         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5507          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5508          * register to preserve the GPIO settings for LOMs. The GPIOs,
5509          * whether used as inputs or outputs, are set by boot code after
5510          * reset.
5511          */
5512         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5513                 u32 gpio_mask;
5514
5515                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5516                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5517
5518                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5519                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5520                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5521
5522                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5523
5524                 /* GPIO1 must be driven high for eeprom write protect */
5525                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5526                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5527         }
5528         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5529         udelay(100);
5530
5531         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5532         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5533         tp->last_tag = 0;
5534
5535         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5536                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5537                 udelay(40);
5538         }
5539
5540         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5541                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5542                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5543                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5544                WDMAC_MODE_LNGREAD_ENAB);
5545
5546         /* If statement applies to 5705 and 5750 PCI devices only */
5547         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5548              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5550                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5551                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5552                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5553                         /* nothing */
5554                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5555                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5556                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5557                         val |= WDMAC_MODE_RX_ACCEL;
5558                 }
5559         }
5560
5561         tw32_f(WDMAC_MODE, val);
5562         udelay(40);
5563
5564         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5565                 val = tr32(TG3PCI_X_CAPS);
5566                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5567                         val &= ~PCIX_CAPS_BURST_MASK;
5568                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5569                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5570                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5571                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5572                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5573                                 val |= (tp->split_mode_max_reqs <<
5574                                         PCIX_CAPS_SPLIT_SHIFT);
5575                 }
5576                 tw32(TG3PCI_X_CAPS, val);
5577         }
5578
5579         tw32_f(RDMAC_MODE, rdmac_mode);
5580         udelay(40);
5581
5582         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5583         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5584                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5585         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5586         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5587         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5588         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5589         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5590 #if TG3_TSO_SUPPORT != 0
5591         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5592                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5593 #endif
5594         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5595         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5596
5597         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5598                 err = tg3_load_5701_a0_firmware_fix(tp);
5599                 if (err)
5600                         return err;
5601         }
5602
5603 #if TG3_TSO_SUPPORT != 0
5604         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5605                 err = tg3_load_tso_firmware(tp);
5606                 if (err)
5607                         return err;
5608         }
5609 #endif
5610
5611         tp->tx_mode = TX_MODE_ENABLE;
5612         tw32_f(MAC_TX_MODE, tp->tx_mode);
5613         udelay(100);
5614
5615         tp->rx_mode = RX_MODE_ENABLE;
5616         tw32_f(MAC_RX_MODE, tp->rx_mode);
5617         udelay(10);
5618
5619         if (tp->link_config.phy_is_low_power) {
5620                 tp->link_config.phy_is_low_power = 0;
5621                 tp->link_config.speed = tp->link_config.orig_speed;
5622                 tp->link_config.duplex = tp->link_config.orig_duplex;
5623                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5624         }
5625
5626         tp->mi_mode = MAC_MI_MODE_BASE;
5627         tw32_f(MAC_MI_MODE, tp->mi_mode);
5628         udelay(80);
5629
5630         tw32(MAC_LED_CTRL, tp->led_ctrl);
5631
5632         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5633         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5634                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5635                 udelay(10);
5636         }
5637         tw32_f(MAC_RX_MODE, tp->rx_mode);
5638         udelay(10);
5639
5640         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5641                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5642                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5643                         /* Set drive transmission level to 1.2V  */
5644                         /* only if the signal pre-emphasis bit is not set  */
5645                         val = tr32(MAC_SERDES_CFG);
5646                         val &= 0xfffff000;
5647                         val |= 0x880;
5648                         tw32(MAC_SERDES_CFG, val);
5649                 }
5650                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5651                         tw32(MAC_SERDES_CFG, 0x616000);
5652         }
5653
5654         /* Prevent chip from dropping frames when flow control
5655          * is enabled.
5656          */
5657         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5658
5659         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5660             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5661                 /* Use hardware link auto-negotiation */
5662                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5663         }
5664
5665         err = tg3_setup_phy(tp, 1);
5666         if (err)
5667                 return err;
5668
5669         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5670                 u32 tmp;
5671
5672                 /* Clear CRC stats. */
5673                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5674                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5675                         tg3_readphy(tp, 0x14, &tmp);
5676                 }
5677         }
5678
5679         __tg3_set_rx_mode(tp->dev);
5680
5681         /* Initialize receive rules. */
5682         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5683         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5684         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5685         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5686
5687         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5688                 limit = 8;
5689         else
5690                 limit = 16;
5691         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5692                 limit -= 4;
5693         switch (limit) {
5694         case 16:
5695                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5696         case 15:
5697                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5698         case 14:
5699                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5700         case 13:
5701                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5702         case 12:
5703                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5704         case 11:
5705                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5706         case 10:
5707                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5708         case 9:
5709                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5710         case 8:
5711                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5712         case 7:
5713                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5714         case 6:
5715                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5716         case 5:
5717                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5718         case 4:
5719                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5720         case 3:
5721                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5722         case 2:
5723         case 1:
5724
5725         default:
5726                 break;
5727         };
5728
5729         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5730
5731         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5732                 tg3_enable_ints(tp);
5733
5734         return 0;
5735 }
5736
5737 /* Called at device open time to get the chip ready for
5738  * packet processing.  Invoked with tp->lock held.
5739  */
5740 static int tg3_init_hw(struct tg3 *tp)
5741 {
5742         int err;
5743
5744         /* Force the chip into D0. */
5745         err = tg3_set_power_state(tp, 0);
5746         if (err)
5747                 goto out;
5748
5749         tg3_switch_clocks(tp);
5750
5751         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5752
5753         err = tg3_reset_hw(tp);
5754
5755 out:
5756         return err;
5757 }
5758
5759 #define TG3_STAT_ADD32(PSTAT, REG) \
5760 do {    u32 __val = tr32(REG); \
5761         (PSTAT)->low += __val; \
5762         if ((PSTAT)->low < __val) \
5763                 (PSTAT)->high += 1; \
5764 } while (0)
5765
5766 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5767 {
5768         struct tg3_hw_stats *sp = tp->hw_stats;
5769
5770         if (!netif_carrier_ok(tp->dev))
5771                 return;
5772
5773         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5774         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5775         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5776         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5777         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5778         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5779         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5780         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5781         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5782         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5783         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5784         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5785         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5786
5787         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5788         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5789         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5790         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5791         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5792         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5793         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5794         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5795         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5796         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5797         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5798         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5799         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5800         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5801 }
5802
5803 static void tg3_timer(unsigned long __opaque)
5804 {
5805         struct tg3 *tp = (struct tg3 *) __opaque;
5806         unsigned long flags;
5807
5808         spin_lock_irqsave(&tp->lock, flags);
5809         spin_lock(&tp->tx_lock);
5810
5811         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5812                 /* All of this garbage is because when using non-tagged
5813                  * IRQ status the mailbox/status_block protocol the chip
5814                  * uses with the cpu is race prone.
5815                  */
5816                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5817                         tw32(GRC_LOCAL_CTRL,
5818                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5819                 } else {
5820                         tw32(HOSTCC_MODE, tp->coalesce_mode |
5821                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5822                 }
5823
5824                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5825                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5826                         spin_unlock(&tp->tx_lock);
5827                         spin_unlock_irqrestore(&tp->lock, flags);
5828                         schedule_work(&tp->reset_task);
5829                         return;
5830                 }
5831         }
5832
5833         /* This part only runs once per second. */
5834         if (!--tp->timer_counter) {
5835                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5836                         tg3_periodic_fetch_stats(tp);
5837
5838                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5839                         u32 mac_stat;
5840                         int phy_event;
5841
5842                         mac_stat = tr32(MAC_STATUS);
5843
5844                         phy_event = 0;
5845                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5846                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5847                                         phy_event = 1;
5848                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5849                                 phy_event = 1;
5850
5851                         if (phy_event)
5852                                 tg3_setup_phy(tp, 0);
5853                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5854                         u32 mac_stat = tr32(MAC_STATUS);
5855                         int need_setup = 0;
5856
5857                         if (netif_carrier_ok(tp->dev) &&
5858                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5859                                 need_setup = 1;
5860                         }
5861                         if (! netif_carrier_ok(tp->dev) &&
5862                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5863                                          MAC_STATUS_SIGNAL_DET))) {
5864                                 need_setup = 1;
5865                         }
5866                         if (need_setup) {
5867                                 tw32_f(MAC_MODE,
5868                                      (tp->mac_mode &
5869                                       ~MAC_MODE_PORT_MODE_MASK));
5870                                 udelay(40);
5871                                 tw32_f(MAC_MODE, tp->mac_mode);
5872                                 udelay(40);
5873                                 tg3_setup_phy(tp, 0);
5874                         }
5875                 }
5876
5877                 tp->timer_counter = tp->timer_multiplier;
5878         }
5879
5880         /* Heartbeat is only sent once every 120 seconds.  */
5881         if (!--tp->asf_counter) {
5882                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5883                         u32 val;
5884
5885                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5886                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5887                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5888                         val = tr32(GRC_RX_CPU_EVENT);
5889                         val |= (1 << 14);
5890                         tw32(GRC_RX_CPU_EVENT, val);
5891                 }
5892                 tp->asf_counter = tp->asf_multiplier;
5893         }
5894
5895         spin_unlock(&tp->tx_lock);
5896         spin_unlock_irqrestore(&tp->lock, flags);
5897
5898         tp->timer.expires = jiffies + tp->timer_offset;
5899         add_timer(&tp->timer);
5900 }
5901
5902 static int tg3_test_interrupt(struct tg3 *tp)
5903 {
5904         struct net_device *dev = tp->dev;
5905         int err, i;
5906         u32 int_mbox = 0;
5907
5908         if (!netif_running(dev))
5909                 return -ENODEV;
5910
5911         tg3_disable_ints(tp);
5912
5913         free_irq(tp->pdev->irq, dev);
5914
5915         err = request_irq(tp->pdev->irq, tg3_test_isr,
5916                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5917         if (err)
5918                 return err;
5919
5920         tg3_enable_ints(tp);
5921
5922         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5923                HOSTCC_MODE_NOW);
5924
5925         for (i = 0; i < 5; i++) {
5926                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5927                 if (int_mbox != 0)
5928                         break;
5929                 msleep(10);
5930         }
5931
5932         tg3_disable_ints(tp);
5933
5934         free_irq(tp->pdev->irq, dev);
5935         
5936         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5937                 err = request_irq(tp->pdev->irq, tg3_msi,
5938                                   SA_SAMPLE_RANDOM, dev->name, dev);
5939         else {
5940                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5941                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5942                         fn = tg3_interrupt_tagged;
5943                 err = request_irq(tp->pdev->irq, fn,
5944                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5945         }
5946
5947         if (err)
5948                 return err;
5949
5950         if (int_mbox != 0)
5951                 return 0;
5952
5953         return -EIO;
5954 }
5955
5956 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5957  * successfully restored
5958  */
5959 static int tg3_test_msi(struct tg3 *tp)
5960 {
5961         struct net_device *dev = tp->dev;
5962         int err;
5963         u16 pci_cmd;
5964
5965         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5966                 return 0;
5967
5968         /* Turn off SERR reporting in case MSI terminates with Master
5969          * Abort.
5970          */
5971         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5972         pci_write_config_word(tp->pdev, PCI_COMMAND,
5973                               pci_cmd & ~PCI_COMMAND_SERR);
5974
5975         err = tg3_test_interrupt(tp);
5976
5977         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5978
5979         if (!err)
5980                 return 0;
5981
5982         /* other failures */
5983         if (err != -EIO)
5984                 return err;
5985
5986         /* MSI test failed, go back to INTx mode */
5987         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5988                "switching to INTx mode. Please report this failure to "
5989                "the PCI maintainer and include system chipset information.\n",
5990                        tp->dev->name);
5991
5992         free_irq(tp->pdev->irq, dev);
5993         pci_disable_msi(tp->pdev);
5994
5995         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5996
5997         {
5998                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5999                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6000                         fn = tg3_interrupt_tagged;
6001
6002                 err = request_irq(tp->pdev->irq, fn,
6003                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6004         }
6005         if (err)
6006                 return err;
6007
6008         /* Need to reset the chip because the MSI cycle may have terminated
6009          * with Master Abort.
6010          */
6011         spin_lock_irq(&tp->lock);
6012         spin_lock(&tp->tx_lock);
6013
6014         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6015         err = tg3_init_hw(tp);
6016
6017         spin_unlock(&tp->tx_lock);
6018         spin_unlock_irq(&tp->lock);
6019
6020         if (err)
6021                 free_irq(tp->pdev->irq, dev);
6022
6023         return err;
6024 }
6025
6026 static int tg3_open(struct net_device *dev)
6027 {
6028         struct tg3 *tp = netdev_priv(dev);
6029         int err;
6030
6031         spin_lock_irq(&tp->lock);
6032         spin_lock(&tp->tx_lock);
6033
6034         tg3_disable_ints(tp);
6035         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6036
6037         spin_unlock(&tp->tx_lock);
6038         spin_unlock_irq(&tp->lock);
6039
6040         /* The placement of this call is tied
6041          * to the setup and use of Host TX descriptors.
6042          */
6043         err = tg3_alloc_consistent(tp);
6044         if (err)
6045                 return err;
6046
6047         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6048             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6049             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6050                 /* All MSI supporting chips should support tagged
6051                  * status.  Assert that this is the case.
6052                  */
6053                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6054                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6055                                "Not using MSI.\n", tp->dev->name);
6056                 } else if (pci_enable_msi(tp->pdev) == 0) {
6057                         u32 msi_mode;
6058
6059                         msi_mode = tr32(MSGINT_MODE);
6060                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6061                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6062                 }
6063         }
6064         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6065                 err = request_irq(tp->pdev->irq, tg3_msi,
6066                                   SA_SAMPLE_RANDOM, dev->name, dev);
6067         else {
6068                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6069                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6070                         fn = tg3_interrupt_tagged;
6071
6072                 err = request_irq(tp->pdev->irq, fn,
6073                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6074         }
6075
6076         if (err) {
6077                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6078                         pci_disable_msi(tp->pdev);
6079                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6080                 }
6081                 tg3_free_consistent(tp);
6082                 return err;
6083         }
6084
6085         spin_lock_irq(&tp->lock);
6086         spin_lock(&tp->tx_lock);
6087
6088         err = tg3_init_hw(tp);
6089         if (err) {
6090                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6091                 tg3_free_rings(tp);
6092         } else {
6093                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6094                         tp->timer_offset = HZ;
6095                 else
6096                         tp->timer_offset = HZ / 10;
6097
6098                 BUG_ON(tp->timer_offset > HZ);
6099                 tp->timer_counter = tp->timer_multiplier =
6100                         (HZ / tp->timer_offset);
6101                 tp->asf_counter = tp->asf_multiplier =
6102                         ((HZ / tp->timer_offset) * 120);
6103
6104                 init_timer(&tp->timer);
6105                 tp->timer.expires = jiffies + tp->timer_offset;
6106                 tp->timer.data = (unsigned long) tp;
6107                 tp->timer.function = tg3_timer;
6108         }
6109
6110         spin_unlock(&tp->tx_lock);
6111         spin_unlock_irq(&tp->lock);
6112
6113         if (err) {
6114                 free_irq(tp->pdev->irq, dev);
6115                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6116                         pci_disable_msi(tp->pdev);
6117                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6118                 }
6119                 tg3_free_consistent(tp);
6120                 return err;
6121         }
6122
6123         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6124                 err = tg3_test_msi(tp);
6125
6126                 if (err) {
6127                         spin_lock_irq(&tp->lock);
6128                         spin_lock(&tp->tx_lock);
6129
6130                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6131                                 pci_disable_msi(tp->pdev);
6132                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6133                         }
6134                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6135                         tg3_free_rings(tp);
6136                         tg3_free_consistent(tp);
6137
6138                         spin_unlock(&tp->tx_lock);
6139                         spin_unlock_irq(&tp->lock);
6140
6141                         return err;
6142                 }
6143         }
6144
6145         spin_lock_irq(&tp->lock);
6146         spin_lock(&tp->tx_lock);
6147
6148         add_timer(&tp->timer);
6149         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6150         tg3_enable_ints(tp);
6151
6152         spin_unlock(&tp->tx_lock);
6153         spin_unlock_irq(&tp->lock);
6154
6155         netif_start_queue(dev);
6156
6157         return 0;
6158 }
6159
6160 #if 0
6161 /*static*/ void tg3_dump_state(struct tg3 *tp)
6162 {
6163         u32 val32, val32_2, val32_3, val32_4, val32_5;
6164         u16 val16;
6165         int i;
6166
6167         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6168         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6169         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6170                val16, val32);
6171
6172         /* MAC block */
6173         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6174                tr32(MAC_MODE), tr32(MAC_STATUS));
6175         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6176                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6177         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6178                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6179         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6180                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6181
6182         /* Send data initiator control block */
6183         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6184                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6185         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6186                tr32(SNDDATAI_STATSCTRL));
6187
6188         /* Send data completion control block */
6189         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6190
6191         /* Send BD ring selector block */
6192         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6193                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6194
6195         /* Send BD initiator control block */
6196         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6197                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6198
6199         /* Send BD completion control block */
6200         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6201
6202         /* Receive list placement control block */
6203         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6204                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6205         printk("       RCVLPC_STATSCTRL[%08x]\n",
6206                tr32(RCVLPC_STATSCTRL));
6207
6208         /* Receive data and receive BD initiator control block */
6209         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6210                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6211
6212         /* Receive data completion control block */
6213         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6214                tr32(RCVDCC_MODE));
6215
6216         /* Receive BD initiator control block */
6217         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6218                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6219
6220         /* Receive BD completion control block */
6221         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6222                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6223
6224         /* Receive list selector control block */
6225         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6226                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6227
6228         /* Mbuf cluster free block */
6229         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6230                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6231
6232         /* Host coalescing control block */
6233         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6234                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6235         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6236                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6237                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6238         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6239                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6240                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6241         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6242                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6243         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6244                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6245
6246         /* Memory arbiter control block */
6247         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6248                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6249
6250         /* Buffer manager control block */
6251         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6252                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6253         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6254                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6255         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6256                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6257                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6258                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6259
6260         /* Read DMA control block */
6261         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6262                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6263
6264         /* Write DMA control block */
6265         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6266                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6267
6268         /* DMA completion block */
6269         printk("DEBUG: DMAC_MODE[%08x]\n",
6270                tr32(DMAC_MODE));
6271
6272         /* GRC block */
6273         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6274                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6275         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6276                tr32(GRC_LOCAL_CTRL));
6277
6278         /* TG3_BDINFOs */
6279         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6280                tr32(RCVDBDI_JUMBO_BD + 0x0),
6281                tr32(RCVDBDI_JUMBO_BD + 0x4),
6282                tr32(RCVDBDI_JUMBO_BD + 0x8),
6283                tr32(RCVDBDI_JUMBO_BD + 0xc));
6284         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6285                tr32(RCVDBDI_STD_BD + 0x0),
6286                tr32(RCVDBDI_STD_BD + 0x4),
6287                tr32(RCVDBDI_STD_BD + 0x8),
6288                tr32(RCVDBDI_STD_BD + 0xc));
6289         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6290                tr32(RCVDBDI_MINI_BD + 0x0),
6291                tr32(RCVDBDI_MINI_BD + 0x4),
6292                tr32(RCVDBDI_MINI_BD + 0x8),
6293                tr32(RCVDBDI_MINI_BD + 0xc));
6294
6295         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6296         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6297         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6298         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6299         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6300                val32, val32_2, val32_3, val32_4);
6301
6302         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6303         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6304         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6305         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6306         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6307                val32, val32_2, val32_3, val32_4);
6308
6309         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6310         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6311         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6312         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6313         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6314         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6315                val32, val32_2, val32_3, val32_4, val32_5);
6316
6317         /* SW status block */
6318         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6319                tp->hw_status->status,
6320                tp->hw_status->status_tag,
6321                tp->hw_status->rx_jumbo_consumer,
6322                tp->hw_status->rx_consumer,
6323                tp->hw_status->rx_mini_consumer,
6324                tp->hw_status->idx[0].rx_producer,
6325                tp->hw_status->idx[0].tx_consumer);
6326
6327         /* SW statistics block */
6328         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6329                ((u32 *)tp->hw_stats)[0],
6330                ((u32 *)tp->hw_stats)[1],
6331                ((u32 *)tp->hw_stats)[2],
6332                ((u32 *)tp->hw_stats)[3]);
6333
6334         /* Mailboxes */
6335         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6336                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6337                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6338                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6339                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6340
6341         /* NIC side send descriptors. */
6342         for (i = 0; i < 6; i++) {
6343                 unsigned long txd;
6344
6345                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6346                         + (i * sizeof(struct tg3_tx_buffer_desc));
6347                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6348                        i,
6349                        readl(txd + 0x0), readl(txd + 0x4),
6350                        readl(txd + 0x8), readl(txd + 0xc));
6351         }
6352
6353         /* NIC side RX descriptors. */
6354         for (i = 0; i < 6; i++) {
6355                 unsigned long rxd;
6356
6357                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6358                         + (i * sizeof(struct tg3_rx_buffer_desc));
6359                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6360                        i,
6361                        readl(rxd + 0x0), readl(rxd + 0x4),
6362                        readl(rxd + 0x8), readl(rxd + 0xc));
6363                 rxd += (4 * sizeof(u32));
6364                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6365                        i,
6366                        readl(rxd + 0x0), readl(rxd + 0x4),
6367                        readl(rxd + 0x8), readl(rxd + 0xc));
6368         }
6369
6370         for (i = 0; i < 6; i++) {
6371                 unsigned long rxd;
6372
6373                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6374                         + (i * sizeof(struct tg3_rx_buffer_desc));
6375                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6376                        i,
6377                        readl(rxd + 0x0), readl(rxd + 0x4),
6378                        readl(rxd + 0x8), readl(rxd + 0xc));
6379                 rxd += (4 * sizeof(u32));
6380                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6381                        i,
6382                        readl(rxd + 0x0), readl(rxd + 0x4),
6383                        readl(rxd + 0x8), readl(rxd + 0xc));
6384         }
6385 }
6386 #endif
6387
6388 static struct net_device_stats *tg3_get_stats(struct net_device *);
6389 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6390
6391 static int tg3_close(struct net_device *dev)
6392 {
6393         struct tg3 *tp = netdev_priv(dev);
6394
6395         netif_stop_queue(dev);
6396
6397         del_timer_sync(&tp->timer);
6398
6399         spin_lock_irq(&tp->lock);
6400         spin_lock(&tp->tx_lock);
6401 #if 0
6402         tg3_dump_state(tp);
6403 #endif
6404
6405         tg3_disable_ints(tp);
6406
6407         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6408         tg3_free_rings(tp);
6409         tp->tg3_flags &=
6410                 ~(TG3_FLAG_INIT_COMPLETE |
6411                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6412         netif_carrier_off(tp->dev);
6413
6414         spin_unlock(&tp->tx_lock);
6415         spin_unlock_irq(&tp->lock);
6416
6417         free_irq(tp->pdev->irq, dev);
6418         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6419                 pci_disable_msi(tp->pdev);
6420                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6421         }
6422
6423         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6424                sizeof(tp->net_stats_prev));
6425         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6426                sizeof(tp->estats_prev));
6427
6428         tg3_free_consistent(tp);
6429
6430         return 0;
6431 }
6432
6433 static inline unsigned long get_stat64(tg3_stat64_t *val)
6434 {
6435         unsigned long ret;
6436
6437 #if (BITS_PER_LONG == 32)
6438         ret = val->low;
6439 #else
6440         ret = ((u64)val->high << 32) | ((u64)val->low);
6441 #endif
6442         return ret;
6443 }
6444
6445 static unsigned long calc_crc_errors(struct tg3 *tp)
6446 {
6447         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6448
6449         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6450             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6451              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6452                 unsigned long flags;
6453                 u32 val;
6454
6455                 spin_lock_irqsave(&tp->lock, flags);
6456                 if (!tg3_readphy(tp, 0x1e, &val)) {
6457                         tg3_writephy(tp, 0x1e, val | 0x8000);
6458                         tg3_readphy(tp, 0x14, &val);
6459                 } else
6460                         val = 0;
6461                 spin_unlock_irqrestore(&tp->lock, flags);
6462
6463                 tp->phy_crc_errors += val;
6464
6465                 return tp->phy_crc_errors;
6466         }
6467
6468         return get_stat64(&hw_stats->rx_fcs_errors);
6469 }
6470
6471 #define ESTAT_ADD(member) \
6472         estats->member =        old_estats->member + \
6473                                 get_stat64(&hw_stats->member)
6474
6475 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6476 {
6477         struct tg3_ethtool_stats *estats = &tp->estats;
6478         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6479         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6480
6481         if (!hw_stats)
6482                 return old_estats;
6483
6484         ESTAT_ADD(rx_octets);
6485         ESTAT_ADD(rx_fragments);
6486         ESTAT_ADD(rx_ucast_packets);
6487         ESTAT_ADD(rx_mcast_packets);
6488         ESTAT_ADD(rx_bcast_packets);
6489         ESTAT_ADD(rx_fcs_errors);
6490         ESTAT_ADD(rx_align_errors);
6491         ESTAT_ADD(rx_xon_pause_rcvd);
6492         ESTAT_ADD(rx_xoff_pause_rcvd);
6493         ESTAT_ADD(rx_mac_ctrl_rcvd);
6494         ESTAT_ADD(rx_xoff_entered);
6495         ESTAT_ADD(rx_frame_too_long_errors);
6496         ESTAT_ADD(rx_jabbers);
6497         ESTAT_ADD(rx_undersize_packets);
6498         ESTAT_ADD(rx_in_length_errors);
6499         ESTAT_ADD(rx_out_length_errors);
6500         ESTAT_ADD(rx_64_or_less_octet_packets);
6501         ESTAT_ADD(rx_65_to_127_octet_packets);
6502         ESTAT_ADD(rx_128_to_255_octet_packets);
6503         ESTAT_ADD(rx_256_to_511_octet_packets);
6504         ESTAT_ADD(rx_512_to_1023_octet_packets);
6505         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6506         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6507         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6508         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6509         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6510
6511         ESTAT_ADD(tx_octets);
6512         ESTAT_ADD(tx_collisions);
6513         ESTAT_ADD(tx_xon_sent);
6514         ESTAT_ADD(tx_xoff_sent);
6515         ESTAT_ADD(tx_flow_control);
6516         ESTAT_ADD(tx_mac_errors);
6517         ESTAT_ADD(tx_single_collisions);
6518         ESTAT_ADD(tx_mult_collisions);
6519         ESTAT_ADD(tx_deferred);
6520         ESTAT_ADD(tx_excessive_collisions);
6521         ESTAT_ADD(tx_late_collisions);
6522         ESTAT_ADD(tx_collide_2times);
6523         ESTAT_ADD(tx_collide_3times);
6524         ESTAT_ADD(tx_collide_4times);
6525         ESTAT_ADD(tx_collide_5times);
6526         ESTAT_ADD(tx_collide_6times);
6527         ESTAT_ADD(tx_collide_7times);
6528         ESTAT_ADD(tx_collide_8times);
6529         ESTAT_ADD(tx_collide_9times);
6530         ESTAT_ADD(tx_collide_10times);
6531         ESTAT_ADD(tx_collide_11times);
6532         ESTAT_ADD(tx_collide_12times);
6533         ESTAT_ADD(tx_collide_13times);
6534         ESTAT_ADD(tx_collide_14times);
6535         ESTAT_ADD(tx_collide_15times);
6536         ESTAT_ADD(tx_ucast_packets);
6537         ESTAT_ADD(tx_mcast_packets);
6538         ESTAT_ADD(tx_bcast_packets);
6539         ESTAT_ADD(tx_carrier_sense_errors);
6540         ESTAT_ADD(tx_discards);
6541         ESTAT_ADD(tx_errors);
6542
6543         ESTAT_ADD(dma_writeq_full);
6544         ESTAT_ADD(dma_write_prioq_full);
6545         ESTAT_ADD(rxbds_empty);
6546         ESTAT_ADD(rx_discards);
6547         ESTAT_ADD(rx_errors);
6548         ESTAT_ADD(rx_threshold_hit);
6549
6550         ESTAT_ADD(dma_readq_full);
6551         ESTAT_ADD(dma_read_prioq_full);
6552         ESTAT_ADD(tx_comp_queue_full);
6553
6554         ESTAT_ADD(ring_set_send_prod_index);
6555         ESTAT_ADD(ring_status_update);
6556         ESTAT_ADD(nic_irqs);
6557         ESTAT_ADD(nic_avoided_irqs);
6558         ESTAT_ADD(nic_tx_threshold_hit);
6559
6560         return estats;
6561 }
6562
6563 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6564 {
6565         struct tg3 *tp = netdev_priv(dev);
6566         struct net_device_stats *stats = &tp->net_stats;
6567         struct net_device_stats *old_stats = &tp->net_stats_prev;
6568         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6569
6570         if (!hw_stats)
6571                 return old_stats;
6572
6573         stats->rx_packets = old_stats->rx_packets +
6574                 get_stat64(&hw_stats->rx_ucast_packets) +
6575                 get_stat64(&hw_stats->rx_mcast_packets) +
6576                 get_stat64(&hw_stats->rx_bcast_packets);
6577                 
6578         stats->tx_packets = old_stats->tx_packets +
6579                 get_stat64(&hw_stats->tx_ucast_packets) +
6580                 get_stat64(&hw_stats->tx_mcast_packets) +
6581                 get_stat64(&hw_stats->tx_bcast_packets);
6582
6583         stats->rx_bytes = old_stats->rx_bytes +
6584                 get_stat64(&hw_stats->rx_octets);
6585         stats->tx_bytes = old_stats->tx_bytes +
6586                 get_stat64(&hw_stats->tx_octets);
6587
6588         stats->rx_errors = old_stats->rx_errors +
6589                 get_stat64(&hw_stats->rx_errors) +
6590                 get_stat64(&hw_stats->rx_discards);
6591         stats->tx_errors = old_stats->tx_errors +
6592                 get_stat64(&hw_stats->tx_errors) +
6593                 get_stat64(&hw_stats->tx_mac_errors) +
6594                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6595                 get_stat64(&hw_stats->tx_discards);
6596
6597         stats->multicast = old_stats->multicast +
6598                 get_stat64(&hw_stats->rx_mcast_packets);
6599         stats->collisions = old_stats->collisions +
6600                 get_stat64(&hw_stats->tx_collisions);
6601
6602         stats->rx_length_errors = old_stats->rx_length_errors +
6603                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6604                 get_stat64(&hw_stats->rx_undersize_packets);
6605
6606         stats->rx_over_errors = old_stats->rx_over_errors +
6607                 get_stat64(&hw_stats->rxbds_empty);
6608         stats->rx_frame_errors = old_stats->rx_frame_errors +
6609                 get_stat64(&hw_stats->rx_align_errors);
6610         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6611                 get_stat64(&hw_stats->tx_discards);
6612         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6613                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6614
6615         stats->rx_crc_errors = old_stats->rx_crc_errors +
6616                 calc_crc_errors(tp);
6617
6618         return stats;
6619 }
6620
6621 static inline u32 calc_crc(unsigned char *buf, int len)
6622 {
6623         u32 reg;
6624         u32 tmp;
6625         int j, k;
6626
6627         reg = 0xffffffff;
6628
6629         for (j = 0; j < len; j++) {
6630                 reg ^= buf[j];
6631
6632                 for (k = 0; k < 8; k++) {
6633                         tmp = reg & 0x01;
6634
6635                         reg >>= 1;
6636
6637                         if (tmp) {
6638                                 reg ^= 0xedb88320;
6639                         }
6640                 }
6641         }
6642
6643         return ~reg;
6644 }
6645
6646 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6647 {
6648         /* accept or reject all multicast frames */
6649         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6650         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6651         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6652         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6653 }
6654
6655 static void __tg3_set_rx_mode(struct net_device *dev)
6656 {
6657         struct tg3 *tp = netdev_priv(dev);
6658         u32 rx_mode;
6659
6660         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6661                                   RX_MODE_KEEP_VLAN_TAG);
6662
6663         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6664          * flag clear.
6665          */
6666 #if TG3_VLAN_TAG_USED
6667         if (!tp->vlgrp &&
6668             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6669                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6670 #else
6671         /* By definition, VLAN is disabled always in this
6672          * case.
6673          */
6674         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6675                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6676 #endif
6677
6678         if (dev->flags & IFF_PROMISC) {
6679                 /* Promiscuous mode. */
6680                 rx_mode |= RX_MODE_PROMISC;
6681         } else if (dev->flags & IFF_ALLMULTI) {
6682                 /* Accept all multicast. */
6683                 tg3_set_multi (tp, 1);
6684         } else if (dev->mc_count < 1) {
6685                 /* Reject all multicast. */
6686                 tg3_set_multi (tp, 0);
6687         } else {
6688                 /* Accept one or more multicast(s). */
6689                 struct dev_mc_list *mclist;
6690                 unsigned int i;
6691                 u32 mc_filter[4] = { 0, };
6692                 u32 regidx;
6693                 u32 bit;
6694                 u32 crc;
6695
6696                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6697                      i++, mclist = mclist->next) {
6698
6699                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6700                         bit = ~crc & 0x7f;
6701                         regidx = (bit & 0x60) >> 5;
6702                         bit &= 0x1f;
6703                         mc_filter[regidx] |= (1 << bit);
6704                 }
6705
6706                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6707                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6708                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6709                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6710         }
6711
6712         if (rx_mode != tp->rx_mode) {
6713                 tp->rx_mode = rx_mode;
6714                 tw32_f(MAC_RX_MODE, rx_mode);
6715                 udelay(10);
6716         }
6717 }
6718
6719 static void tg3_set_rx_mode(struct net_device *dev)
6720 {
6721         struct tg3 *tp = netdev_priv(dev);
6722
6723         spin_lock_irq(&tp->lock);
6724         spin_lock(&tp->tx_lock);
6725         __tg3_set_rx_mode(dev);
6726         spin_unlock(&tp->tx_lock);
6727         spin_unlock_irq(&tp->lock);
6728 }
6729
6730 #define TG3_REGDUMP_LEN         (32 * 1024)
6731
6732 static int tg3_get_regs_len(struct net_device *dev)
6733 {
6734         return TG3_REGDUMP_LEN;
6735 }
6736
6737 static void tg3_get_regs(struct net_device *dev,
6738                 struct ethtool_regs *regs, void *_p)
6739 {
6740         u32 *p = _p;
6741         struct tg3 *tp = netdev_priv(dev);
6742         u8 *orig_p = _p;
6743         int i;
6744
6745         regs->version = 0;
6746
6747         memset(p, 0, TG3_REGDUMP_LEN);
6748
6749         spin_lock_irq(&tp->lock);
6750         spin_lock(&tp->tx_lock);
6751
6752 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6753 #define GET_REG32_LOOP(base,len)                \
6754 do {    p = (u32 *)(orig_p + (base));           \
6755         for (i = 0; i < len; i += 4)            \
6756                 __GET_REG32((base) + i);        \
6757 } while (0)
6758 #define GET_REG32_1(reg)                        \
6759 do {    p = (u32 *)(orig_p + (reg));            \
6760         __GET_REG32((reg));                     \
6761 } while (0)
6762
6763         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6764         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6765         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6766         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6767         GET_REG32_1(SNDDATAC_MODE);
6768         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6769         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6770         GET_REG32_1(SNDBDC_MODE);
6771         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6772         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6773         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6774         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6775         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6776         GET_REG32_1(RCVDCC_MODE);
6777         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6778         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6779         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6780         GET_REG32_1(MBFREE_MODE);
6781         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6782         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6783         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6784         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6785         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6786         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6787         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6788         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6789         GET_REG32_LOOP(FTQ_RESET, 0x120);
6790         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6791         GET_REG32_1(DMAC_MODE);
6792         GET_REG32_LOOP(GRC_MODE, 0x4c);
6793         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6794                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6795
6796 #undef __GET_REG32
6797 #undef GET_REG32_LOOP
6798 #undef GET_REG32_1
6799
6800         spin_unlock(&tp->tx_lock);
6801         spin_unlock_irq(&tp->lock);
6802 }
6803
6804 static int tg3_get_eeprom_len(struct net_device *dev)
6805 {
6806         struct tg3 *tp = netdev_priv(dev);
6807
6808         return tp->nvram_size;
6809 }
6810
6811 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6812
6813 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6814 {
6815         struct tg3 *tp = netdev_priv(dev);
6816         int ret;
6817         u8  *pd;
6818         u32 i, offset, len, val, b_offset, b_count;
6819
6820         offset = eeprom->offset;
6821         len = eeprom->len;
6822         eeprom->len = 0;
6823
6824         eeprom->magic = TG3_EEPROM_MAGIC;
6825
6826         if (offset & 3) {
6827                 /* adjustments to start on required 4 byte boundary */
6828                 b_offset = offset & 3;
6829                 b_count = 4 - b_offset;
6830                 if (b_count > len) {
6831                         /* i.e. offset=1 len=2 */
6832                         b_count = len;
6833                 }
6834                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6835                 if (ret)
6836                         return ret;
6837                 val = cpu_to_le32(val);
6838                 memcpy(data, ((char*)&val) + b_offset, b_count);
6839                 len -= b_count;
6840                 offset += b_count;
6841                 eeprom->len += b_count;
6842         }
6843
6844         /* read bytes upto the last 4 byte boundary */
6845         pd = &data[eeprom->len];
6846         for (i = 0; i < (len - (len & 3)); i += 4) {
6847                 ret = tg3_nvram_read(tp, offset + i, &val);
6848                 if (ret) {
6849                         eeprom->len += i;
6850                         return ret;
6851                 }
6852                 val = cpu_to_le32(val);
6853                 memcpy(pd + i, &val, 4);
6854         }
6855         eeprom->len += i;
6856
6857         if (len & 3) {
6858                 /* read last bytes not ending on 4 byte boundary */
6859                 pd = &data[eeprom->len];
6860                 b_count = len & 3;
6861                 b_offset = offset + len - b_count;
6862                 ret = tg3_nvram_read(tp, b_offset, &val);
6863                 if (ret)
6864                         return ret;
6865                 val = cpu_to_le32(val);
6866                 memcpy(pd, ((char*)&val), b_count);
6867                 eeprom->len += b_count;
6868         }
6869         return 0;
6870 }
6871
6872 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6873
6874 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6875 {
6876         struct tg3 *tp = netdev_priv(dev);
6877         int ret;
6878         u32 offset, len, b_offset, odd_len, start, end;
6879         u8 *buf;
6880
6881         if (eeprom->magic != TG3_EEPROM_MAGIC)
6882                 return -EINVAL;
6883
6884         offset = eeprom->offset;
6885         len = eeprom->len;
6886
6887         if ((b_offset = (offset & 3))) {
6888                 /* adjustments to start on required 4 byte boundary */
6889                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6890                 if (ret)
6891                         return ret;
6892                 start = cpu_to_le32(start);
6893                 len += b_offset;
6894                 offset &= ~3;
6895                 if (len < 4)
6896                         len = 4;
6897         }
6898
6899         odd_len = 0;
6900         if (len & 3) {
6901                 /* adjustments to end on required 4 byte boundary */
6902                 odd_len = 1;
6903                 len = (len + 3) & ~3;
6904                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6905                 if (ret)
6906                         return ret;
6907                 end = cpu_to_le32(end);
6908         }
6909
6910         buf = data;
6911         if (b_offset || odd_len) {
6912                 buf = kmalloc(len, GFP_KERNEL);
6913                 if (buf == 0)
6914                         return -ENOMEM;
6915                 if (b_offset)
6916                         memcpy(buf, &start, 4);
6917                 if (odd_len)
6918                         memcpy(buf+len-4, &end, 4);
6919                 memcpy(buf + b_offset, data, eeprom->len);
6920         }
6921
6922         ret = tg3_nvram_write_block(tp, offset, len, buf);
6923
6924         if (buf != data)
6925                 kfree(buf);
6926
6927         return ret;
6928 }
6929
6930 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6931 {
6932         struct tg3 *tp = netdev_priv(dev);
6933   
6934         cmd->supported = (SUPPORTED_Autoneg);
6935
6936         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6937                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6938                                    SUPPORTED_1000baseT_Full);
6939
6940         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6941                 cmd->supported |= (SUPPORTED_100baseT_Half |
6942                                   SUPPORTED_100baseT_Full |
6943                                   SUPPORTED_10baseT_Half |
6944                                   SUPPORTED_10baseT_Full |
6945                                   SUPPORTED_MII);
6946         else
6947                 cmd->supported |= SUPPORTED_FIBRE;
6948   
6949         cmd->advertising = tp->link_config.advertising;
6950         if (netif_running(dev)) {
6951                 cmd->speed = tp->link_config.active_speed;
6952                 cmd->duplex = tp->link_config.active_duplex;
6953         }
6954         cmd->port = 0;
6955         cmd->phy_address = PHY_ADDR;
6956         cmd->transceiver = 0;
6957         cmd->autoneg = tp->link_config.autoneg;
6958         cmd->maxtxpkt = 0;
6959         cmd->maxrxpkt = 0;
6960         return 0;
6961 }
6962   
6963 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6964 {
6965         struct tg3 *tp = netdev_priv(dev);
6966   
6967         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6968                 /* These are the only valid advertisement bits allowed.  */
6969                 if (cmd->autoneg == AUTONEG_ENABLE &&
6970                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6971                                           ADVERTISED_1000baseT_Full |
6972                                           ADVERTISED_Autoneg |
6973                                           ADVERTISED_FIBRE)))
6974                         return -EINVAL;
6975         }
6976
6977         spin_lock_irq(&tp->lock);
6978         spin_lock(&tp->tx_lock);
6979
6980         tp->link_config.autoneg = cmd->autoneg;
6981         if (cmd->autoneg == AUTONEG_ENABLE) {
6982                 tp->link_config.advertising = cmd->advertising;
6983                 tp->link_config.speed = SPEED_INVALID;
6984                 tp->link_config.duplex = DUPLEX_INVALID;
6985         } else {
6986                 tp->link_config.advertising = 0;
6987                 tp->link_config.speed = cmd->speed;
6988                 tp->link_config.duplex = cmd->duplex;
6989         }
6990   
6991         if (netif_running(dev))
6992                 tg3_setup_phy(tp, 1);
6993
6994         spin_unlock(&tp->tx_lock);
6995         spin_unlock_irq(&tp->lock);
6996   
6997         return 0;
6998 }
6999   
7000 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7001 {
7002         struct tg3 *tp = netdev_priv(dev);
7003   
7004         strcpy(info->driver, DRV_MODULE_NAME);
7005         strcpy(info->version, DRV_MODULE_VERSION);
7006         strcpy(info->bus_info, pci_name(tp->pdev));
7007 }
7008   
7009 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7010 {
7011         struct tg3 *tp = netdev_priv(dev);
7012   
7013         wol->supported = WAKE_MAGIC;
7014         wol->wolopts = 0;
7015         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7016                 wol->wolopts = WAKE_MAGIC;
7017         memset(&wol->sopass, 0, sizeof(wol->sopass));
7018 }
7019   
7020 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7021 {
7022         struct tg3 *tp = netdev_priv(dev);
7023   
7024         if (wol->wolopts & ~WAKE_MAGIC)
7025                 return -EINVAL;
7026         if ((wol->wolopts & WAKE_MAGIC) &&
7027             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7028             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7029                 return -EINVAL;
7030   
7031         spin_lock_irq(&tp->lock);
7032         if (wol->wolopts & WAKE_MAGIC)
7033                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7034         else
7035                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7036         spin_unlock_irq(&tp->lock);
7037   
7038         return 0;
7039 }
7040   
7041 static u32 tg3_get_msglevel(struct net_device *dev)
7042 {
7043         struct tg3 *tp = netdev_priv(dev);
7044         return tp->msg_enable;
7045 }
7046   
7047 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7048 {
7049         struct tg3 *tp = netdev_priv(dev);
7050         tp->msg_enable = value;
7051 }
7052   
7053 #if TG3_TSO_SUPPORT != 0
7054 static int tg3_set_tso(struct net_device *dev, u32 value)
7055 {
7056         struct tg3 *tp = netdev_priv(dev);
7057
7058         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7059                 if (value)
7060                         return -EINVAL;
7061                 return 0;
7062         }
7063         return ethtool_op_set_tso(dev, value);
7064 }
7065 #endif
7066   
7067 static int tg3_nway_reset(struct net_device *dev)
7068 {
7069         struct tg3 *tp = netdev_priv(dev);
7070         u32 bmcr;
7071         int r;
7072   
7073         if (!netif_running(dev))
7074                 return -EAGAIN;
7075
7076         spin_lock_irq(&tp->lock);
7077         r = -EINVAL;
7078         tg3_readphy(tp, MII_BMCR, &bmcr);
7079         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7080             (bmcr & BMCR_ANENABLE)) {
7081                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7082                 r = 0;
7083         }
7084         spin_unlock_irq(&tp->lock);
7085   
7086         return r;
7087 }
7088   
7089 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7090 {
7091         struct tg3 *tp = netdev_priv(dev);
7092   
7093         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7094         ering->rx_mini_max_pending = 0;
7095         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7096
7097         ering->rx_pending = tp->rx_pending;
7098         ering->rx_mini_pending = 0;
7099         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7100         ering->tx_pending = tp->tx_pending;
7101 }
7102   
7103 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7104 {
7105         struct tg3 *tp = netdev_priv(dev);
7106   
7107         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7108             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7109             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7110                 return -EINVAL;
7111   
7112         if (netif_running(dev))
7113                 tg3_netif_stop(tp);
7114
7115         spin_lock_irq(&tp->lock);
7116         spin_lock(&tp->tx_lock);
7117   
7118         tp->rx_pending = ering->rx_pending;
7119
7120         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7121             tp->rx_pending > 63)
7122                 tp->rx_pending = 63;
7123         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7124         tp->tx_pending = ering->tx_pending;
7125
7126         if (netif_running(dev)) {
7127                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7128                 tg3_init_hw(tp);
7129                 tg3_netif_start(tp);
7130         }
7131
7132         spin_unlock(&tp->tx_lock);
7133         spin_unlock_irq(&tp->lock);
7134   
7135         return 0;
7136 }
7137   
7138 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7139 {
7140         struct tg3 *tp = netdev_priv(dev);
7141   
7142         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7143         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7144         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7145 }
7146   
7147 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7148 {
7149         struct tg3 *tp = netdev_priv(dev);
7150   
7151         if (netif_running(dev))
7152                 tg3_netif_stop(tp);
7153
7154         spin_lock_irq(&tp->lock);
7155         spin_lock(&tp->tx_lock);
7156         if (epause->autoneg)
7157                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7158         else
7159                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7160         if (epause->rx_pause)
7161                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7162         else
7163                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7164         if (epause->tx_pause)
7165                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7166         else
7167                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7168
7169         if (netif_running(dev)) {
7170                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7171                 tg3_init_hw(tp);
7172                 tg3_netif_start(tp);
7173         }
7174         spin_unlock(&tp->tx_lock);
7175         spin_unlock_irq(&tp->lock);
7176   
7177         return 0;
7178 }
7179   
7180 static u32 tg3_get_rx_csum(struct net_device *dev)
7181 {
7182         struct tg3 *tp = netdev_priv(dev);
7183         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7184 }
7185   
7186 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7187 {
7188         struct tg3 *tp = netdev_priv(dev);
7189   
7190         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7191                 if (data != 0)
7192                         return -EINVAL;
7193                 return 0;
7194         }
7195   
7196         spin_lock_irq(&tp->lock);
7197         if (data)
7198                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7199         else
7200                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7201         spin_unlock_irq(&tp->lock);
7202   
7203         return 0;
7204 }
7205   
7206 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7207 {
7208         struct tg3 *tp = netdev_priv(dev);
7209   
7210         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7211                 if (data != 0)
7212                         return -EINVAL;
7213                 return 0;
7214         }
7215   
7216         if (data)
7217                 dev->features |= NETIF_F_IP_CSUM;
7218         else
7219                 dev->features &= ~NETIF_F_IP_CSUM;
7220
7221         return 0;
7222 }
7223
7224 static int tg3_get_stats_count (struct net_device *dev)
7225 {
7226         return TG3_NUM_STATS;
7227 }
7228
7229 static int tg3_get_test_count (struct net_device *dev)
7230 {
7231         return TG3_NUM_TEST;
7232 }
7233
7234 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7235 {
7236         switch (stringset) {
7237         case ETH_SS_STATS:
7238                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7239                 break;
7240         case ETH_SS_TEST:
7241                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7242                 break;
7243         default:
7244                 WARN_ON(1);     /* we need a WARN() */
7245                 break;
7246         }
7247 }
7248
7249 static void tg3_get_ethtool_stats (struct net_device *dev,
7250                                    struct ethtool_stats *estats, u64 *tmp_stats)
7251 {
7252         struct tg3 *tp = netdev_priv(dev);
7253         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7254 }
7255
7256 #define NVRAM_TEST_SIZE 0x100
7257
7258 static int tg3_test_nvram(struct tg3 *tp)
7259 {
7260         u32 *buf, csum;
7261         int i, j, err = 0;
7262
7263         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7264         if (buf == NULL)
7265                 return -ENOMEM;
7266
7267         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7268                 u32 val;
7269
7270                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7271                         break;
7272                 buf[j] = cpu_to_le32(val);
7273         }
7274         if (i < NVRAM_TEST_SIZE)
7275                 goto out;
7276
7277         err = -EIO;
7278         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7279                 goto out;
7280
7281         /* Bootstrap checksum at offset 0x10 */
7282         csum = calc_crc((unsigned char *) buf, 0x10);
7283         if(csum != cpu_to_le32(buf[0x10/4]))
7284                 goto out;
7285
7286         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7287         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7288         if (csum != cpu_to_le32(buf[0xfc/4]))
7289                  goto out;
7290
7291         err = 0;
7292
7293 out:
7294         kfree(buf);
7295         return err;
7296 }
7297
7298 #define TG3_SERDES_TIMEOUT_SEC  2
7299 #define TG3_COPPER_TIMEOUT_SEC  6
7300
7301 static int tg3_test_link(struct tg3 *tp)
7302 {
7303         int i, max;
7304
7305         if (!netif_running(tp->dev))
7306                 return -ENODEV;
7307
7308         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7309                 max = TG3_SERDES_TIMEOUT_SEC;
7310         else
7311                 max = TG3_COPPER_TIMEOUT_SEC;
7312
7313         for (i = 0; i < max; i++) {
7314                 if (netif_carrier_ok(tp->dev))
7315                         return 0;
7316
7317                 if (msleep_interruptible(1000))
7318                         break;
7319         }
7320
7321         return -EIO;
7322 }
7323
7324 /* Only test the commonly used registers */
7325 static int tg3_test_registers(struct tg3 *tp)
7326 {
7327         int i, is_5705;
7328         u32 offset, read_mask, write_mask, val, save_val, read_val;
7329         static struct {
7330                 u16 offset;
7331                 u16 flags;
7332 #define TG3_FL_5705     0x1
7333 #define TG3_FL_NOT_5705 0x2
7334 #define TG3_FL_NOT_5788 0x4
7335                 u32 read_mask;
7336                 u32 write_mask;
7337         } reg_tbl[] = {
7338                 /* MAC Control Registers */
7339                 { MAC_MODE, TG3_FL_NOT_5705,
7340                         0x00000000, 0x00ef6f8c },
7341                 { MAC_MODE, TG3_FL_5705,
7342                         0x00000000, 0x01ef6b8c },
7343                 { MAC_STATUS, TG3_FL_NOT_5705,
7344                         0x03800107, 0x00000000 },
7345                 { MAC_STATUS, TG3_FL_5705,
7346                         0x03800100, 0x00000000 },
7347                 { MAC_ADDR_0_HIGH, 0x0000,
7348                         0x00000000, 0x0000ffff },
7349                 { MAC_ADDR_0_LOW, 0x0000,
7350                         0x00000000, 0xffffffff },
7351                 { MAC_RX_MTU_SIZE, 0x0000,
7352                         0x00000000, 0x0000ffff },
7353                 { MAC_TX_MODE, 0x0000,
7354                         0x00000000, 0x00000070 },
7355                 { MAC_TX_LENGTHS, 0x0000,
7356                         0x00000000, 0x00003fff },
7357                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7358                         0x00000000, 0x000007fc },
7359                 { MAC_RX_MODE, TG3_FL_5705,
7360                         0x00000000, 0x000007dc },
7361                 { MAC_HASH_REG_0, 0x0000,
7362                         0x00000000, 0xffffffff },
7363                 { MAC_HASH_REG_1, 0x0000,
7364                         0x00000000, 0xffffffff },
7365                 { MAC_HASH_REG_2, 0x0000,
7366                         0x00000000, 0xffffffff },
7367                 { MAC_HASH_REG_3, 0x0000,
7368                         0x00000000, 0xffffffff },
7369
7370                 /* Receive Data and Receive BD Initiator Control Registers. */
7371                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7372                         0x00000000, 0xffffffff },
7373                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7374                         0x00000000, 0xffffffff },
7375                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7376                         0x00000000, 0x00000003 },
7377                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7378                         0x00000000, 0xffffffff },
7379                 { RCVDBDI_STD_BD+0, 0x0000,
7380                         0x00000000, 0xffffffff },
7381                 { RCVDBDI_STD_BD+4, 0x0000,
7382                         0x00000000, 0xffffffff },
7383                 { RCVDBDI_STD_BD+8, 0x0000,
7384                         0x00000000, 0xffff0002 },
7385                 { RCVDBDI_STD_BD+0xc, 0x0000,
7386                         0x00000000, 0xffffffff },
7387         
7388                 /* Receive BD Initiator Control Registers. */
7389                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7390                         0x00000000, 0xffffffff },
7391                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7392                         0x00000000, 0x000003ff },
7393                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7394                         0x00000000, 0xffffffff },
7395         
7396                 /* Host Coalescing Control Registers. */
7397                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7398                         0x00000000, 0x00000004 },
7399                 { HOSTCC_MODE, TG3_FL_5705,
7400                         0x00000000, 0x000000f6 },
7401                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7402                         0x00000000, 0xffffffff },
7403                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7404                         0x00000000, 0x000003ff },
7405                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7406                         0x00000000, 0xffffffff },
7407                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7408                         0x00000000, 0x000003ff },
7409                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7410                         0x00000000, 0xffffffff },
7411                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7412                         0x00000000, 0x000000ff },
7413                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7414                         0x00000000, 0xffffffff },
7415                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7416                         0x00000000, 0x000000ff },
7417                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7418                         0x00000000, 0xffffffff },
7419                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7420                         0x00000000, 0xffffffff },
7421                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7422                         0x00000000, 0xffffffff },
7423                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7424                         0x00000000, 0x000000ff },
7425                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7426                         0x00000000, 0xffffffff },
7427                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7428                         0x00000000, 0x000000ff },
7429                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7430                         0x00000000, 0xffffffff },
7431                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7432                         0x00000000, 0xffffffff },
7433                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7434                         0x00000000, 0xffffffff },
7435                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7436                         0x00000000, 0xffffffff },
7437                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7438                         0x00000000, 0xffffffff },
7439                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7440                         0xffffffff, 0x00000000 },
7441                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7442                         0xffffffff, 0x00000000 },
7443
7444                 /* Buffer Manager Control Registers. */
7445                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7446                         0x00000000, 0x007fff80 },
7447                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7448                         0x00000000, 0x007fffff },
7449                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7450                         0x00000000, 0x0000003f },
7451                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7452                         0x00000000, 0x000001ff },
7453                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7454                         0x00000000, 0x000001ff },
7455                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7456                         0xffffffff, 0x00000000 },
7457                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7458                         0xffffffff, 0x00000000 },
7459         
7460                 /* Mailbox Registers */
7461                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7462                         0x00000000, 0x000001ff },
7463                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7464                         0x00000000, 0x000001ff },
7465                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7466                         0x00000000, 0x000007ff },
7467                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7468                         0x00000000, 0x000001ff },
7469
7470                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7471         };
7472
7473         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7474                 is_5705 = 1;
7475         else
7476                 is_5705 = 0;
7477
7478         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7479                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7480                         continue;
7481
7482                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7483                         continue;
7484
7485                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7486                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7487                         continue;
7488
7489                 offset = (u32) reg_tbl[i].offset;
7490                 read_mask = reg_tbl[i].read_mask;
7491                 write_mask = reg_tbl[i].write_mask;
7492
7493                 /* Save the original register content */
7494                 save_val = tr32(offset);
7495
7496                 /* Determine the read-only value. */
7497                 read_val = save_val & read_mask;
7498
7499                 /* Write zero to the register, then make sure the read-only bits
7500                  * are not changed and the read/write bits are all zeros.
7501                  */
7502                 tw32(offset, 0);
7503
7504                 val = tr32(offset);
7505
7506                 /* Test the read-only and read/write bits. */
7507                 if (((val & read_mask) != read_val) || (val & write_mask))
7508                         goto out;
7509
7510                 /* Write ones to all the bits defined by RdMask and WrMask, then
7511                  * make sure the read-only bits are not changed and the
7512                  * read/write bits are all ones.
7513                  */
7514                 tw32(offset, read_mask | write_mask);
7515
7516                 val = tr32(offset);
7517
7518                 /* Test the read-only bits. */
7519                 if ((val & read_mask) != read_val)
7520                         goto out;
7521
7522                 /* Test the read/write bits. */
7523                 if ((val & write_mask) != write_mask)
7524                         goto out;
7525
7526                 tw32(offset, save_val);
7527         }
7528
7529         return 0;
7530
7531 out:
7532         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7533         tw32(offset, save_val);
7534         return -EIO;
7535 }
7536
7537 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7538 {
7539         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7540         int i;
7541         u32 j;
7542
7543         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7544                 for (j = 0; j < len; j += 4) {
7545                         u32 val;
7546
7547                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7548                         tg3_read_mem(tp, offset + j, &val);
7549                         if (val != test_pattern[i])
7550                                 return -EIO;
7551                 }
7552         }
7553         return 0;
7554 }
7555
7556 static int tg3_test_memory(struct tg3 *tp)
7557 {
7558         static struct mem_entry {
7559                 u32 offset;
7560                 u32 len;
7561         } mem_tbl_570x[] = {
7562                 { 0x00000000, 0x01000},
7563                 { 0x00002000, 0x1c000},
7564                 { 0xffffffff, 0x00000}
7565         }, mem_tbl_5705[] = {
7566                 { 0x00000100, 0x0000c},
7567                 { 0x00000200, 0x00008},
7568                 { 0x00000b50, 0x00400},
7569                 { 0x00004000, 0x00800},
7570                 { 0x00006000, 0x01000},
7571                 { 0x00008000, 0x02000},
7572                 { 0x00010000, 0x0e000},
7573                 { 0xffffffff, 0x00000}
7574         };
7575         struct mem_entry *mem_tbl;
7576         int err = 0;
7577         int i;
7578
7579         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7580                 mem_tbl = mem_tbl_5705;
7581         else
7582                 mem_tbl = mem_tbl_570x;
7583
7584         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7585                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7586                     mem_tbl[i].len)) != 0)
7587                         break;
7588         }
7589         
7590         return err;
7591 }
7592
7593 static int tg3_test_loopback(struct tg3 *tp)
7594 {
7595         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7596         u32 desc_idx;
7597         struct sk_buff *skb, *rx_skb;
7598         u8 *tx_data;
7599         dma_addr_t map;
7600         int num_pkts, tx_len, rx_len, i, err;
7601         struct tg3_rx_buffer_desc *desc;
7602
7603         if (!netif_running(tp->dev))
7604                 return -ENODEV;
7605
7606         err = -EIO;
7607
7608         tg3_abort_hw(tp, 1);
7609
7610         /* Clearing this flag to keep interrupts disabled */
7611         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7612         tg3_reset_hw(tp);
7613
7614         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7615                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7616                    MAC_MODE_PORT_MODE_GMII;
7617         tw32(MAC_MODE, mac_mode);
7618
7619         tx_len = 1514;
7620         skb = dev_alloc_skb(tx_len);
7621         tx_data = skb_put(skb, tx_len);
7622         memcpy(tx_data, tp->dev->dev_addr, 6);
7623         memset(tx_data + 6, 0x0, 8);
7624
7625         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7626
7627         for (i = 14; i < tx_len; i++)
7628                 tx_data[i] = (u8) (i & 0xff);
7629
7630         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7631
7632         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7633              HOSTCC_MODE_NOW);
7634
7635         udelay(10);
7636
7637         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7638
7639         send_idx = 0;
7640         num_pkts = 0;
7641
7642         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7643
7644         send_idx++;
7645         num_pkts++;
7646
7647         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7648         tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7649
7650         udelay(10);
7651
7652         for (i = 0; i < 10; i++) {
7653                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7654                        HOSTCC_MODE_NOW);
7655
7656                 udelay(10);
7657
7658                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7659                 rx_idx = tp->hw_status->idx[0].rx_producer;
7660                 if ((tx_idx == send_idx) &&
7661                     (rx_idx == (rx_start_idx + num_pkts)))
7662                         break;
7663         }
7664
7665         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7666         dev_kfree_skb(skb);
7667
7668         if (tx_idx != send_idx)
7669                 goto out;
7670
7671         if (rx_idx != rx_start_idx + num_pkts)
7672                 goto out;
7673
7674         desc = &tp->rx_rcb[rx_start_idx];
7675         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7676         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7677         if (opaque_key != RXD_OPAQUE_RING_STD)
7678                 goto out;
7679
7680         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7681             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7682                 goto out;
7683
7684         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7685         if (rx_len != tx_len)
7686                 goto out;
7687
7688         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7689
7690         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7691         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7692
7693         for (i = 14; i < tx_len; i++) {
7694                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7695                         goto out;
7696         }
7697         err = 0;
7698         
7699         /* tg3_free_rings will unmap and free the rx_skb */
7700 out:
7701         return err;
7702 }
7703
7704 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7705                           u64 *data)
7706 {
7707         struct tg3 *tp = netdev_priv(dev);
7708
7709         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7710
7711         if (tg3_test_nvram(tp) != 0) {
7712                 etest->flags |= ETH_TEST_FL_FAILED;
7713                 data[0] = 1;
7714         }
7715         if (tg3_test_link(tp) != 0) {
7716                 etest->flags |= ETH_TEST_FL_FAILED;
7717                 data[1] = 1;
7718         }
7719         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7720                 if (netif_running(dev))
7721                         tg3_netif_stop(tp);
7722
7723                 spin_lock_irq(&tp->lock);
7724                 spin_lock(&tp->tx_lock);
7725
7726                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7727                 tg3_nvram_lock(tp);
7728                 tg3_halt_cpu(tp, RX_CPU_BASE);
7729                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7730                         tg3_halt_cpu(tp, TX_CPU_BASE);
7731                 tg3_nvram_unlock(tp);
7732
7733                 if (tg3_test_registers(tp) != 0) {
7734                         etest->flags |= ETH_TEST_FL_FAILED;
7735                         data[2] = 1;
7736                 }
7737                 if (tg3_test_memory(tp) != 0) {
7738                         etest->flags |= ETH_TEST_FL_FAILED;
7739                         data[3] = 1;
7740                 }
7741                 if (tg3_test_loopback(tp) != 0) {
7742                         etest->flags |= ETH_TEST_FL_FAILED;
7743                         data[4] = 1;
7744                 }
7745
7746                 spin_unlock(&tp->tx_lock);
7747                 spin_unlock_irq(&tp->lock);
7748                 if (tg3_test_interrupt(tp) != 0) {
7749                         etest->flags |= ETH_TEST_FL_FAILED;
7750                         data[5] = 1;
7751                 }
7752                 spin_lock_irq(&tp->lock);
7753                 spin_lock(&tp->tx_lock);
7754
7755                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7756                 if (netif_running(dev)) {
7757                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7758                         tg3_init_hw(tp);
7759                         tg3_netif_start(tp);
7760                 }
7761                 spin_unlock(&tp->tx_lock);
7762                 spin_unlock_irq(&tp->lock);
7763         }
7764 }
7765
7766 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7767 {
7768         struct mii_ioctl_data *data = if_mii(ifr);
7769         struct tg3 *tp = netdev_priv(dev);
7770         int err;
7771
7772         switch(cmd) {
7773         case SIOCGMIIPHY:
7774                 data->phy_id = PHY_ADDR;
7775
7776                 /* fallthru */
7777         case SIOCGMIIREG: {
7778                 u32 mii_regval;
7779
7780                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7781                         break;                  /* We have no PHY */
7782
7783                 spin_lock_irq(&tp->lock);
7784                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7785                 spin_unlock_irq(&tp->lock);
7786
7787                 data->val_out = mii_regval;
7788
7789                 return err;
7790         }
7791
7792         case SIOCSMIIREG:
7793                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7794                         break;                  /* We have no PHY */
7795
7796                 if (!capable(CAP_NET_ADMIN))
7797                         return -EPERM;
7798
7799                 spin_lock_irq(&tp->lock);
7800                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7801                 spin_unlock_irq(&tp->lock);
7802
7803                 return err;
7804
7805         default:
7806                 /* do nothing */
7807                 break;
7808         }
7809         return -EOPNOTSUPP;
7810 }
7811
7812 #if TG3_VLAN_TAG_USED
7813 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7814 {
7815         struct tg3 *tp = netdev_priv(dev);
7816
7817         spin_lock_irq(&tp->lock);
7818         spin_lock(&tp->tx_lock);
7819
7820         tp->vlgrp = grp;
7821
7822         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7823         __tg3_set_rx_mode(dev);
7824
7825         spin_unlock(&tp->tx_lock);
7826         spin_unlock_irq(&tp->lock);
7827 }
7828
7829 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7830 {
7831         struct tg3 *tp = netdev_priv(dev);
7832
7833         spin_lock_irq(&tp->lock);
7834         spin_lock(&tp->tx_lock);
7835         if (tp->vlgrp)
7836                 tp->vlgrp->vlan_devices[vid] = NULL;
7837         spin_unlock(&tp->tx_lock);
7838         spin_unlock_irq(&tp->lock);
7839 }
7840 #endif
7841
7842 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7843 {
7844         struct tg3 *tp = netdev_priv(dev);
7845
7846         memcpy(ec, &tp->coal, sizeof(*ec));
7847         return 0;
7848 }
7849
7850 static struct ethtool_ops tg3_ethtool_ops = {
7851         .get_settings           = tg3_get_settings,
7852         .set_settings           = tg3_set_settings,
7853         .get_drvinfo            = tg3_get_drvinfo,
7854         .get_regs_len           = tg3_get_regs_len,
7855         .get_regs               = tg3_get_regs,
7856         .get_wol                = tg3_get_wol,
7857         .set_wol                = tg3_set_wol,
7858         .get_msglevel           = tg3_get_msglevel,
7859         .set_msglevel           = tg3_set_msglevel,
7860         .nway_reset             = tg3_nway_reset,
7861         .get_link               = ethtool_op_get_link,
7862         .get_eeprom_len         = tg3_get_eeprom_len,
7863         .get_eeprom             = tg3_get_eeprom,
7864         .set_eeprom             = tg3_set_eeprom,
7865         .get_ringparam          = tg3_get_ringparam,
7866         .set_ringparam          = tg3_set_ringparam,
7867         .get_pauseparam         = tg3_get_pauseparam,
7868         .set_pauseparam         = tg3_set_pauseparam,
7869         .get_rx_csum            = tg3_get_rx_csum,
7870         .set_rx_csum            = tg3_set_rx_csum,
7871         .get_tx_csum            = ethtool_op_get_tx_csum,
7872         .set_tx_csum            = tg3_set_tx_csum,
7873         .get_sg                 = ethtool_op_get_sg,
7874         .set_sg                 = ethtool_op_set_sg,
7875 #if TG3_TSO_SUPPORT != 0
7876         .get_tso                = ethtool_op_get_tso,
7877         .set_tso                = tg3_set_tso,
7878 #endif
7879         .self_test_count        = tg3_get_test_count,
7880         .self_test              = tg3_self_test,
7881         .get_strings            = tg3_get_strings,
7882         .get_stats_count        = tg3_get_stats_count,
7883         .get_ethtool_stats      = tg3_get_ethtool_stats,
7884         .get_coalesce           = tg3_get_coalesce,
7885 };
7886
7887 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7888 {
7889         u32 cursize, val;
7890
7891         tp->nvram_size = EEPROM_CHIP_SIZE;
7892
7893         if (tg3_nvram_read(tp, 0, &val) != 0)
7894                 return;
7895
7896         if (swab32(val) != TG3_EEPROM_MAGIC)
7897                 return;
7898
7899         /*
7900          * Size the chip by reading offsets at increasing powers of two.
7901          * When we encounter our validation signature, we know the addressing
7902          * has wrapped around, and thus have our chip size.
7903          */
7904         cursize = 0x800;
7905
7906         while (cursize < tp->nvram_size) {
7907                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7908                         return;
7909
7910                 if (swab32(val) == TG3_EEPROM_MAGIC)
7911                         break;
7912
7913                 cursize <<= 1;
7914         }
7915
7916         tp->nvram_size = cursize;
7917 }
7918                 
7919 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7920 {
7921         u32 val;
7922
7923         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7924                 if (val != 0) {
7925                         tp->nvram_size = (val >> 16) * 1024;
7926                         return;
7927                 }
7928         }
7929         tp->nvram_size = 0x20000;
7930 }
7931
7932 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7933 {
7934         u32 nvcfg1;
7935
7936         nvcfg1 = tr32(NVRAM_CFG1);
7937         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7938                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7939         }
7940         else {
7941                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7942                 tw32(NVRAM_CFG1, nvcfg1);
7943         }
7944
7945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7946                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7947                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7948                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7949                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7950                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7951                                 break;
7952                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7953                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7954                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7955                                 break;
7956                         case FLASH_VENDOR_ATMEL_EEPROM:
7957                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7958                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7959                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7960                                 break;
7961                         case FLASH_VENDOR_ST:
7962                                 tp->nvram_jedecnum = JEDEC_ST;
7963                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7964                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7965                                 break;
7966                         case FLASH_VENDOR_SAIFUN:
7967                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7968                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7969                                 break;
7970                         case FLASH_VENDOR_SST_SMALL:
7971                         case FLASH_VENDOR_SST_LARGE:
7972                                 tp->nvram_jedecnum = JEDEC_SST;
7973                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7974                                 break;
7975                 }
7976         }
7977         else {
7978                 tp->nvram_jedecnum = JEDEC_ATMEL;
7979                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7980                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7981         }
7982 }
7983
7984 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7985 {
7986         u32 nvcfg1;
7987
7988         nvcfg1 = tr32(NVRAM_CFG1);
7989
7990         /* NVRAM protection for TPM */
7991         if (nvcfg1 & (1 << 27))
7992                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7993
7994         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7995                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7996                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7997                         tp->nvram_jedecnum = JEDEC_ATMEL;
7998                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7999                         break;
8000                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8001                         tp->nvram_jedecnum = JEDEC_ATMEL;
8002                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8003                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8004                         break;
8005                 case FLASH_5752VENDOR_ST_M45PE10:
8006                 case FLASH_5752VENDOR_ST_M45PE20:
8007                 case FLASH_5752VENDOR_ST_M45PE40:
8008                         tp->nvram_jedecnum = JEDEC_ST;
8009                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8010                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8011                         break;
8012         }
8013
8014         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8015                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8016                         case FLASH_5752PAGE_SIZE_256:
8017                                 tp->nvram_pagesize = 256;
8018                                 break;
8019                         case FLASH_5752PAGE_SIZE_512:
8020                                 tp->nvram_pagesize = 512;
8021                                 break;
8022                         case FLASH_5752PAGE_SIZE_1K:
8023                                 tp->nvram_pagesize = 1024;
8024                                 break;
8025                         case FLASH_5752PAGE_SIZE_2K:
8026                                 tp->nvram_pagesize = 2048;
8027                                 break;
8028                         case FLASH_5752PAGE_SIZE_4K:
8029                                 tp->nvram_pagesize = 4096;
8030                                 break;
8031                         case FLASH_5752PAGE_SIZE_264:
8032                                 tp->nvram_pagesize = 264;
8033                                 break;
8034                 }
8035         }
8036         else {
8037                 /* For eeprom, set pagesize to maximum eeprom size */
8038                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8039
8040                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8041                 tw32(NVRAM_CFG1, nvcfg1);
8042         }
8043 }
8044
8045 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8046 static void __devinit tg3_nvram_init(struct tg3 *tp)
8047 {
8048         int j;
8049
8050         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8051                 return;
8052
8053         tw32_f(GRC_EEPROM_ADDR,
8054              (EEPROM_ADDR_FSM_RESET |
8055               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8056                EEPROM_ADDR_CLKPERD_SHIFT)));
8057
8058         /* XXX schedule_timeout() ... */
8059         for (j = 0; j < 100; j++)
8060                 udelay(10);
8061
8062         /* Enable seeprom accesses. */
8063         tw32_f(GRC_LOCAL_CTRL,
8064              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8065         udelay(100);
8066
8067         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8068             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8069                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8070
8071                 tg3_enable_nvram_access(tp);
8072
8073                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8074                         tg3_get_5752_nvram_info(tp);
8075                 else
8076                         tg3_get_nvram_info(tp);
8077
8078                 tg3_get_nvram_size(tp);
8079
8080                 tg3_disable_nvram_access(tp);
8081
8082         } else {
8083                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8084
8085                 tg3_get_eeprom_size(tp);
8086         }
8087 }
8088
8089 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8090                                         u32 offset, u32 *val)
8091 {
8092         u32 tmp;
8093         int i;
8094
8095         if (offset > EEPROM_ADDR_ADDR_MASK ||
8096             (offset % 4) != 0)
8097                 return -EINVAL;
8098
8099         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8100                                         EEPROM_ADDR_DEVID_MASK |
8101                                         EEPROM_ADDR_READ);
8102         tw32(GRC_EEPROM_ADDR,
8103              tmp |
8104              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8105              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8106               EEPROM_ADDR_ADDR_MASK) |
8107              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8108
8109         for (i = 0; i < 10000; i++) {
8110                 tmp = tr32(GRC_EEPROM_ADDR);
8111
8112                 if (tmp & EEPROM_ADDR_COMPLETE)
8113                         break;
8114                 udelay(100);
8115         }
8116         if (!(tmp & EEPROM_ADDR_COMPLETE))
8117                 return -EBUSY;
8118
8119         *val = tr32(GRC_EEPROM_DATA);
8120         return 0;
8121 }
8122
8123 #define NVRAM_CMD_TIMEOUT 10000
8124
8125 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8126 {
8127         int i;
8128
8129         tw32(NVRAM_CMD, nvram_cmd);
8130         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8131                 udelay(10);
8132                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8133                         udelay(10);
8134                         break;
8135                 }
8136         }
8137         if (i == NVRAM_CMD_TIMEOUT) {
8138                 return -EBUSY;
8139         }
8140         return 0;
8141 }
8142
8143 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8144 {
8145         int ret;
8146
8147         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8148                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8149                 return -EINVAL;
8150         }
8151
8152         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8153                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8154
8155         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8156                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8157                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8158
8159                 offset = ((offset / tp->nvram_pagesize) <<
8160                           ATMEL_AT45DB0X1B_PAGE_POS) +
8161                         (offset % tp->nvram_pagesize);
8162         }
8163
8164         if (offset > NVRAM_ADDR_MSK)
8165                 return -EINVAL;
8166
8167         tg3_nvram_lock(tp);
8168
8169         tg3_enable_nvram_access(tp);
8170
8171         tw32(NVRAM_ADDR, offset);
8172         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8173                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8174
8175         if (ret == 0)
8176                 *val = swab32(tr32(NVRAM_RDDATA));
8177
8178         tg3_nvram_unlock(tp);
8179
8180         tg3_disable_nvram_access(tp);
8181
8182         return ret;
8183 }
8184
8185 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8186                                     u32 offset, u32 len, u8 *buf)
8187 {
8188         int i, j, rc = 0;
8189         u32 val;
8190
8191         for (i = 0; i < len; i += 4) {
8192                 u32 addr, data;
8193
8194                 addr = offset + i;
8195
8196                 memcpy(&data, buf + i, 4);
8197
8198                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8199
8200                 val = tr32(GRC_EEPROM_ADDR);
8201                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8202
8203                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8204                         EEPROM_ADDR_READ);
8205                 tw32(GRC_EEPROM_ADDR, val |
8206                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8207                         (addr & EEPROM_ADDR_ADDR_MASK) |
8208                         EEPROM_ADDR_START |
8209                         EEPROM_ADDR_WRITE);
8210                 
8211                 for (j = 0; j < 10000; j++) {
8212                         val = tr32(GRC_EEPROM_ADDR);
8213
8214                         if (val & EEPROM_ADDR_COMPLETE)
8215                                 break;
8216                         udelay(100);
8217                 }
8218                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8219                         rc = -EBUSY;
8220                         break;
8221                 }
8222         }
8223
8224         return rc;
8225 }
8226
8227 /* offset and length are dword aligned */
8228 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8229                 u8 *buf)
8230 {
8231         int ret = 0;
8232         u32 pagesize = tp->nvram_pagesize;
8233         u32 pagemask = pagesize - 1;
8234         u32 nvram_cmd;
8235         u8 *tmp;
8236
8237         tmp = kmalloc(pagesize, GFP_KERNEL);
8238         if (tmp == NULL)
8239                 return -ENOMEM;
8240
8241         while (len) {
8242                 int j;
8243                 u32 phy_addr, page_off, size;
8244
8245                 phy_addr = offset & ~pagemask;
8246         
8247                 for (j = 0; j < pagesize; j += 4) {
8248                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8249                                                 (u32 *) (tmp + j))))
8250                                 break;
8251                 }
8252                 if (ret)
8253                         break;
8254
8255                 page_off = offset & pagemask;
8256                 size = pagesize;
8257                 if (len < size)
8258                         size = len;
8259
8260                 len -= size;
8261
8262                 memcpy(tmp + page_off, buf, size);
8263
8264                 offset = offset + (pagesize - page_off);
8265
8266                 tg3_enable_nvram_access(tp);
8267
8268                 /*
8269                  * Before we can erase the flash page, we need
8270                  * to issue a special "write enable" command.
8271                  */
8272                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8273
8274                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8275                         break;
8276
8277                 /* Erase the target page */
8278                 tw32(NVRAM_ADDR, phy_addr);
8279
8280                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8281                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8282
8283                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8284                         break;
8285
8286                 /* Issue another write enable to start the write. */
8287                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8288
8289                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8290                         break;
8291
8292                 for (j = 0; j < pagesize; j += 4) {
8293                         u32 data;
8294
8295                         data = *((u32 *) (tmp + j));
8296                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8297
8298                         tw32(NVRAM_ADDR, phy_addr + j);
8299
8300                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8301                                 NVRAM_CMD_WR;
8302
8303                         if (j == 0)
8304                                 nvram_cmd |= NVRAM_CMD_FIRST;
8305                         else if (j == (pagesize - 4))
8306                                 nvram_cmd |= NVRAM_CMD_LAST;
8307
8308                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8309                                 break;
8310                 }
8311                 if (ret)
8312                         break;
8313         }
8314
8315         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8316         tg3_nvram_exec_cmd(tp, nvram_cmd);
8317
8318         kfree(tmp);
8319
8320         return ret;
8321 }
8322
8323 /* offset and length are dword aligned */
8324 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8325                 u8 *buf)
8326 {
8327         int i, ret = 0;
8328
8329         for (i = 0; i < len; i += 4, offset += 4) {
8330                 u32 data, page_off, phy_addr, nvram_cmd;
8331
8332                 memcpy(&data, buf + i, 4);
8333                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8334
8335                 page_off = offset % tp->nvram_pagesize;
8336
8337                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8338                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8339
8340                         phy_addr = ((offset / tp->nvram_pagesize) <<
8341                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8342                 }
8343                 else {
8344                         phy_addr = offset;
8345                 }
8346
8347                 tw32(NVRAM_ADDR, phy_addr);
8348
8349                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8350
8351                 if ((page_off == 0) || (i == 0))
8352                         nvram_cmd |= NVRAM_CMD_FIRST;
8353                 else if (page_off == (tp->nvram_pagesize - 4))
8354                         nvram_cmd |= NVRAM_CMD_LAST;
8355
8356                 if (i == (len - 4))
8357                         nvram_cmd |= NVRAM_CMD_LAST;
8358
8359                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8360                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8361
8362                         if ((ret = tg3_nvram_exec_cmd(tp,
8363                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8364                                 NVRAM_CMD_DONE)))
8365
8366                                 break;
8367                 }
8368                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8369                         /* We always do complete word writes to eeprom. */
8370                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8371                 }
8372
8373                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8374                         break;
8375         }
8376         return ret;
8377 }
8378
8379 /* offset and length are dword aligned */
8380 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8381 {
8382         int ret;
8383
8384         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8385                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8386                 return -EINVAL;
8387         }
8388
8389         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8390                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8391                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8392                 udelay(40);
8393         }
8394
8395         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8396                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8397         }
8398         else {
8399                 u32 grc_mode;
8400
8401                 tg3_nvram_lock(tp);
8402
8403                 tg3_enable_nvram_access(tp);
8404                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8405                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8406                         tw32(NVRAM_WRITE1, 0x406);
8407
8408                 grc_mode = tr32(GRC_MODE);
8409                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8410
8411                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8412                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8413
8414                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8415                                 buf);
8416                 }
8417                 else {
8418                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8419                                 buf);
8420                 }
8421
8422                 grc_mode = tr32(GRC_MODE);
8423                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8424
8425                 tg3_disable_nvram_access(tp);
8426                 tg3_nvram_unlock(tp);
8427         }
8428
8429         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8430                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8431                 udelay(40);
8432         }
8433
8434         return ret;
8435 }
8436
8437 struct subsys_tbl_ent {
8438         u16 subsys_vendor, subsys_devid;
8439         u32 phy_id;
8440 };
8441
8442 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8443         /* Broadcom boards. */
8444         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8445         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8446         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8447         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8448         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8449         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8450         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8451         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8452         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8453         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8454         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8455
8456         /* 3com boards. */
8457         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8458         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8459         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8460         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8461         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8462
8463         /* DELL boards. */
8464         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8465         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8466         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8467         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8468
8469         /* Compaq boards. */
8470         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8471         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8472         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8473         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8474         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8475
8476         /* IBM boards. */
8477         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8478 };
8479
8480 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8481 {
8482         int i;
8483
8484         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8485                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8486                      tp->pdev->subsystem_vendor) &&
8487                     (subsys_id_to_phy_id[i].subsys_devid ==
8488                      tp->pdev->subsystem_device))
8489                         return &subsys_id_to_phy_id[i];
8490         }
8491         return NULL;
8492 }
8493
8494 /* Since this function may be called in D3-hot power state during
8495  * tg3_init_one(), only config cycles are allowed.
8496  */
8497 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8498 {
8499         u32 val;
8500
8501         /* Make sure register accesses (indirect or otherwise)
8502          * will function correctly.
8503          */
8504         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8505                                tp->misc_host_ctrl);
8506
8507         tp->phy_id = PHY_ID_INVALID;
8508         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8509
8510         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8511         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8512                 u32 nic_cfg, led_cfg;
8513                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8514                 int eeprom_phy_serdes = 0;
8515
8516                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8517                 tp->nic_sram_data_cfg = nic_cfg;
8518
8519                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8520                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8521                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8522                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8523                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8524                     (ver > 0) && (ver < 0x100))
8525                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8526
8527                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8528                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8529                         eeprom_phy_serdes = 1;
8530
8531                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8532                 if (nic_phy_id != 0) {
8533                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8534                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8535
8536                         eeprom_phy_id  = (id1 >> 16) << 10;
8537                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8538                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8539                 } else
8540                         eeprom_phy_id = 0;
8541
8542                 tp->phy_id = eeprom_phy_id;
8543                 if (eeprom_phy_serdes)
8544                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8545
8546                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8547                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8548                                     SHASTA_EXT_LED_MODE_MASK);
8549                 else
8550                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8551
8552                 switch (led_cfg) {
8553                 default:
8554                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8555                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8556                         break;
8557
8558                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8559                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8560                         break;
8561
8562                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8563                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8564
8565                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8566                          * read on some older 5700/5701 bootcode.
8567                          */
8568                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8569                             ASIC_REV_5700 ||
8570                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8571                             ASIC_REV_5701)
8572                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8573
8574                         break;
8575
8576                 case SHASTA_EXT_LED_SHARED:
8577                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8578                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8579                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8580                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8581                                                  LED_CTRL_MODE_PHY_2);
8582                         break;
8583
8584                 case SHASTA_EXT_LED_MAC:
8585                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8586                         break;
8587
8588                 case SHASTA_EXT_LED_COMBO:
8589                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8590                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8591                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8592                                                  LED_CTRL_MODE_PHY_2);
8593                         break;
8594
8595                 };
8596
8597                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8598                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8599                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8600                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8601
8602                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8603                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8604                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8605                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8606
8607                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8608                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8609                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8610                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8611                 }
8612                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8613                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8614
8615                 if (cfg2 & (1 << 17))
8616                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8617
8618                 /* serdes signal pre-emphasis in register 0x590 set by */
8619                 /* bootcode if bit 18 is set */
8620                 if (cfg2 & (1 << 18))
8621                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8622         }
8623 }
8624
8625 static int __devinit tg3_phy_probe(struct tg3 *tp)
8626 {
8627         u32 hw_phy_id_1, hw_phy_id_2;
8628         u32 hw_phy_id, hw_phy_id_masked;
8629         int err;
8630
8631         /* Reading the PHY ID register can conflict with ASF
8632          * firwmare access to the PHY hardware.
8633          */
8634         err = 0;
8635         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8636                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8637         } else {
8638                 /* Now read the physical PHY_ID from the chip and verify
8639                  * that it is sane.  If it doesn't look good, we fall back
8640                  * to either the hard-coded table based PHY_ID and failing
8641                  * that the value found in the eeprom area.
8642                  */
8643                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8644                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8645
8646                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8647                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8648                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8649
8650                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8651         }
8652
8653         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8654                 tp->phy_id = hw_phy_id;
8655                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8656                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8657         } else {
8658                 if (tp->phy_id != PHY_ID_INVALID) {
8659                         /* Do nothing, phy ID already set up in
8660                          * tg3_get_eeprom_hw_cfg().
8661                          */
8662                 } else {
8663                         struct subsys_tbl_ent *p;
8664
8665                         /* No eeprom signature?  Try the hardcoded
8666                          * subsys device table.
8667                          */
8668                         p = lookup_by_subsys(tp);
8669                         if (!p)
8670                                 return -ENODEV;
8671
8672                         tp->phy_id = p->phy_id;
8673                         if (!tp->phy_id ||
8674                             tp->phy_id == PHY_ID_BCM8002)
8675                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8676                 }
8677         }
8678
8679         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8680             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8681                 u32 bmsr, adv_reg, tg3_ctrl;
8682
8683                 tg3_readphy(tp, MII_BMSR, &bmsr);
8684                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8685                     (bmsr & BMSR_LSTATUS))
8686                         goto skip_phy_reset;
8687                     
8688                 err = tg3_phy_reset(tp);
8689                 if (err)
8690                         return err;
8691
8692                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8693                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8694                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8695                 tg3_ctrl = 0;
8696                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8697                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8698                                     MII_TG3_CTRL_ADV_1000_FULL);
8699                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8700                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8701                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8702                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8703                 }
8704
8705                 if (!tg3_copper_is_advertising_all(tp)) {
8706                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8707
8708                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8709                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8710
8711                         tg3_writephy(tp, MII_BMCR,
8712                                      BMCR_ANENABLE | BMCR_ANRESTART);
8713                 }
8714                 tg3_phy_set_wirespeed(tp);
8715
8716                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8717                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8718                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8719         }
8720
8721 skip_phy_reset:
8722         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8723                 err = tg3_init_5401phy_dsp(tp);
8724                 if (err)
8725                         return err;
8726         }
8727
8728         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8729                 err = tg3_init_5401phy_dsp(tp);
8730         }
8731
8732         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8733                 tp->link_config.advertising =
8734                         (ADVERTISED_1000baseT_Half |
8735                          ADVERTISED_1000baseT_Full |
8736                          ADVERTISED_Autoneg |
8737                          ADVERTISED_FIBRE);
8738         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8739                 tp->link_config.advertising &=
8740                         ~(ADVERTISED_1000baseT_Half |
8741                           ADVERTISED_1000baseT_Full);
8742
8743         return err;
8744 }
8745
8746 static void __devinit tg3_read_partno(struct tg3 *tp)
8747 {
8748         unsigned char vpd_data[256];
8749         int i;
8750
8751         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8752                 /* Sun decided not to put the necessary bits in the
8753                  * NVRAM of their onboard tg3 parts :(
8754                  */
8755                 strcpy(tp->board_part_number, "Sun 570X");
8756                 return;
8757         }
8758
8759         for (i = 0; i < 256; i += 4) {
8760                 u32 tmp;
8761
8762                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8763                         goto out_not_found;
8764
8765                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8766                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8767                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8768                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8769         }
8770
8771         /* Now parse and find the part number. */
8772         for (i = 0; i < 256; ) {
8773                 unsigned char val = vpd_data[i];
8774                 int block_end;
8775
8776                 if (val == 0x82 || val == 0x91) {
8777                         i = (i + 3 +
8778                              (vpd_data[i + 1] +
8779                               (vpd_data[i + 2] << 8)));
8780                         continue;
8781                 }
8782
8783                 if (val != 0x90)
8784                         goto out_not_found;
8785
8786                 block_end = (i + 3 +
8787                              (vpd_data[i + 1] +
8788                               (vpd_data[i + 2] << 8)));
8789                 i += 3;
8790                 while (i < block_end) {
8791                         if (vpd_data[i + 0] == 'P' &&
8792                             vpd_data[i + 1] == 'N') {
8793                                 int partno_len = vpd_data[i + 2];
8794
8795                                 if (partno_len > 24)
8796                                         goto out_not_found;
8797
8798                                 memcpy(tp->board_part_number,
8799                                        &vpd_data[i + 3],
8800                                        partno_len);
8801
8802                                 /* Success. */
8803                                 return;
8804                         }
8805                 }
8806
8807                 /* Part number not found. */
8808                 goto out_not_found;
8809         }
8810
8811 out_not_found:
8812         strcpy(tp->board_part_number, "none");
8813 }
8814
8815 #ifdef CONFIG_SPARC64
8816 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8817 {
8818         struct pci_dev *pdev = tp->pdev;
8819         struct pcidev_cookie *pcp = pdev->sysdata;
8820
8821         if (pcp != NULL) {
8822                 int node = pcp->prom_node;
8823                 u32 venid;
8824                 int err;
8825
8826                 err = prom_getproperty(node, "subsystem-vendor-id",
8827                                        (char *) &venid, sizeof(venid));
8828                 if (err == 0 || err == -1)
8829                         return 0;
8830                 if (venid == PCI_VENDOR_ID_SUN)
8831                         return 1;
8832         }
8833         return 0;
8834 }
8835 #endif
8836
8837 static int __devinit tg3_get_invariants(struct tg3 *tp)
8838 {
8839         static struct pci_device_id write_reorder_chipsets[] = {
8840                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8841                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8842                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8843                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8844                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8845                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8846                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8847                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8848                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8849                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8850                 { },
8851         };
8852         u32 misc_ctrl_reg;
8853         u32 cacheline_sz_reg;
8854         u32 pci_state_reg, grc_misc_cfg;
8855         u32 val;
8856         u16 pci_cmd;
8857         int err;
8858
8859 #ifdef CONFIG_SPARC64
8860         if (tg3_is_sun_570X(tp))
8861                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8862 #endif
8863
8864         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8865          * reordering to the mailbox registers done by the host
8866          * controller can cause major troubles.  We read back from
8867          * every mailbox register write to force the writes to be
8868          * posted to the chip in order.
8869          */
8870         if (pci_dev_present(write_reorder_chipsets))
8871                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8872
8873         /* Force memory write invalidate off.  If we leave it on,
8874          * then on 5700_BX chips we have to enable a workaround.
8875          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8876          * to match the cacheline size.  The Broadcom driver have this
8877          * workaround but turns MWI off all the times so never uses
8878          * it.  This seems to suggest that the workaround is insufficient.
8879          */
8880         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8881         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8882         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8883
8884         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8885          * has the register indirect write enable bit set before
8886          * we try to access any of the MMIO registers.  It is also
8887          * critical that the PCI-X hw workaround situation is decided
8888          * before that as well.
8889          */
8890         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8891                               &misc_ctrl_reg);
8892
8893         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8894                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8895
8896         /* Wrong chip ID in 5752 A0. This code can be removed later
8897          * as A0 is not in production.
8898          */
8899         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8900                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8901
8902         /* Initialize misc host control in PCI block. */
8903         tp->misc_host_ctrl |= (misc_ctrl_reg &
8904                                MISC_HOST_CTRL_CHIPREV);
8905         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8906                                tp->misc_host_ctrl);
8907
8908         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8909                               &cacheline_sz_reg);
8910
8911         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8912         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8913         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8914         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8915
8916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8918                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8919
8920         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8921             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8922                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8923
8924         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8925                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8926
8927         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8928                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8929
8930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8931             tp->pci_lat_timer < 64) {
8932                 tp->pci_lat_timer = 64;
8933
8934                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8935                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8936                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8937                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8938
8939                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8940                                        cacheline_sz_reg);
8941         }
8942
8943         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8944                               &pci_state_reg);
8945
8946         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8947                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8948
8949                 /* If this is a 5700 BX chipset, and we are in PCI-X
8950                  * mode, enable register write workaround.
8951                  *
8952                  * The workaround is to use indirect register accesses
8953                  * for all chip writes not to mailbox registers.
8954                  */
8955                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8956                         u32 pm_reg;
8957                         u16 pci_cmd;
8958
8959                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8960
8961                         /* The chip can have it's power management PCI config
8962                          * space registers clobbered due to this bug.
8963                          * So explicitly force the chip into D0 here.
8964                          */
8965                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8966                                               &pm_reg);
8967                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8968                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8969                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8970                                                pm_reg);
8971
8972                         /* Also, force SERR#/PERR# in PCI command. */
8973                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8974                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8975                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8976                 }
8977         }
8978
8979         /* Back to back register writes can cause problems on this chip,
8980          * the workaround is to read back all reg writes except those to
8981          * mailbox regs.  See tg3_write_indirect_reg32().
8982          *
8983          * PCI Express 5750_A0 rev chips need this workaround too.
8984          */
8985         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8986             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8987              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8988                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8989
8990         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8991                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8992         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8993                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8994
8995         /* Chip-specific fixup from Broadcom driver */
8996         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8997             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8998                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8999                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9000         }
9001
9002         /* Get eeprom hw config before calling tg3_set_power_state().
9003          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9004          * determined before calling tg3_set_power_state() so that
9005          * we know whether or not to switch out of Vaux power.
9006          * When the flag is set, it means that GPIO1 is used for eeprom
9007          * write protect and also implies that it is a LOM where GPIOs
9008          * are not used to switch power.
9009          */ 
9010         tg3_get_eeprom_hw_cfg(tp);
9011
9012         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9013          * GPIO1 driven high will bring 5700's external PHY out of reset.
9014          * It is also used as eeprom write protect on LOMs.
9015          */
9016         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9017         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9018             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9019                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9020                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9021         /* Unused GPIO3 must be driven as output on 5752 because there
9022          * are no pull-up resistors on unused GPIO pins.
9023          */
9024         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9025                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9026
9027         /* Force the chip into D0. */
9028         err = tg3_set_power_state(tp, 0);
9029         if (err) {
9030                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9031                        pci_name(tp->pdev));
9032                 return err;
9033         }
9034
9035         /* 5700 B0 chips do not support checksumming correctly due
9036          * to hardware bugs.
9037          */
9038         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9039                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9040
9041         /* Pseudo-header checksum is done by hardware logic and not
9042          * the offload processers, so make the chip do the pseudo-
9043          * header checksums on receive.  For transmit it is more
9044          * convenient to do the pseudo-header checksum in software
9045          * as Linux does that on transmit for us in all cases.
9046          */
9047         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9048         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9049
9050         /* Derive initial jumbo mode from MTU assigned in
9051          * ether_setup() via the alloc_etherdev() call
9052          */
9053         if (tp->dev->mtu > ETH_DATA_LEN)
9054                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
9055
9056         /* Determine WakeOnLan speed to use. */
9057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9058             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9059             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9060             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9061                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9062         } else {
9063                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9064         }
9065
9066         /* A few boards don't want Ethernet@WireSpeed phy feature */
9067         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9068             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9069              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9070              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
9071                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9072
9073         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9074             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9075                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9076         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9077                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9078
9079         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9080                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9081
9082         tp->coalesce_mode = 0;
9083         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9084             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9085                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9086
9087         /* Initialize MAC MI mode, polling disabled. */
9088         tw32_f(MAC_MI_MODE, tp->mi_mode);
9089         udelay(80);
9090
9091         /* Initialize data/descriptor byte/word swapping. */
9092         val = tr32(GRC_MODE);
9093         val &= GRC_MODE_HOST_STACKUP;
9094         tw32(GRC_MODE, val | tp->grc_mode);
9095
9096         tg3_switch_clocks(tp);
9097
9098         /* Clear this out for sanity. */
9099         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9100
9101         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9102                               &pci_state_reg);
9103         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9104             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9105                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9106
9107                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9108                     chiprevid == CHIPREV_ID_5701_B0 ||
9109                     chiprevid == CHIPREV_ID_5701_B2 ||
9110                     chiprevid == CHIPREV_ID_5701_B5) {
9111                         void __iomem *sram_base;
9112
9113                         /* Write some dummy words into the SRAM status block
9114                          * area, see if it reads back correctly.  If the return
9115                          * value is bad, force enable the PCIX workaround.
9116                          */
9117                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9118
9119                         writel(0x00000000, sram_base);
9120                         writel(0x00000000, sram_base + 4);
9121                         writel(0xffffffff, sram_base + 4);
9122                         if (readl(sram_base) != 0x00000000)
9123                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9124                 }
9125         }
9126
9127         udelay(50);
9128         tg3_nvram_init(tp);
9129
9130         grc_misc_cfg = tr32(GRC_MISC_CFG);
9131         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9132
9133         /* Broadcom's driver says that CIOBE multisplit has a bug */
9134 #if 0
9135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9136             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9137                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9138                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9139         }
9140 #endif
9141         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9142             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9143              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9144                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9145
9146         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9147             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9148                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9149         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9150                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9151                                       HOSTCC_MODE_CLRTICK_TXBD);
9152
9153                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9154                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9155                                        tp->misc_host_ctrl);
9156         }
9157
9158         /* these are limited to 10/100 only */
9159         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9160              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9161             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9162              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9163              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9164               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9165               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9166             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9167              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9168               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9169                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9170
9171         err = tg3_phy_probe(tp);
9172         if (err) {
9173                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9174                        pci_name(tp->pdev), err);
9175                 /* ... but do not return immediately ... */
9176         }
9177
9178         tg3_read_partno(tp);
9179
9180         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9181                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9182         } else {
9183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9184                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9185                 else
9186                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9187         }
9188
9189         /* 5700 {AX,BX} chips have a broken status block link
9190          * change bit implementation, so we must use the
9191          * status register in those cases.
9192          */
9193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9194                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9195         else
9196                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9197
9198         /* The led_ctrl is set during tg3_phy_probe, here we might
9199          * have to force the link status polling mechanism based
9200          * upon subsystem IDs.
9201          */
9202         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9203             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9204                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9205                                   TG3_FLAG_USE_LINKCHG_REG);
9206         }
9207
9208         /* For all SERDES we poll the MAC status register. */
9209         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9210                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9211         else
9212                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9213
9214         /* 5700 BX chips need to have their TX producer index mailboxes
9215          * written twice to workaround a bug.
9216          */
9217         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9218                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9219         else
9220                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9221
9222         /* It seems all chips can get confused if TX buffers
9223          * straddle the 4GB address boundary in some cases.
9224          */
9225         tp->dev->hard_start_xmit = tg3_start_xmit;
9226
9227         tp->rx_offset = 2;
9228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9229             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9230                 tp->rx_offset = 0;
9231
9232         /* By default, disable wake-on-lan.  User can change this
9233          * using ETHTOOL_SWOL.
9234          */
9235         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9236
9237         return err;
9238 }
9239
9240 #ifdef CONFIG_SPARC64
9241 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9242 {
9243         struct net_device *dev = tp->dev;
9244         struct pci_dev *pdev = tp->pdev;
9245         struct pcidev_cookie *pcp = pdev->sysdata;
9246
9247         if (pcp != NULL) {
9248                 int node = pcp->prom_node;
9249
9250                 if (prom_getproplen(node, "local-mac-address") == 6) {
9251                         prom_getproperty(node, "local-mac-address",
9252                                          dev->dev_addr, 6);
9253                         return 0;
9254                 }
9255         }
9256         return -ENODEV;
9257 }
9258
9259 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9260 {
9261         struct net_device *dev = tp->dev;
9262
9263         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9264         return 0;
9265 }
9266 #endif
9267
9268 static int __devinit tg3_get_device_address(struct tg3 *tp)
9269 {
9270         struct net_device *dev = tp->dev;
9271         u32 hi, lo, mac_offset;
9272
9273 #ifdef CONFIG_SPARC64
9274         if (!tg3_get_macaddr_sparc(tp))
9275                 return 0;
9276 #endif
9277
9278         mac_offset = 0x7c;
9279         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9280             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
9281                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9282                         mac_offset = 0xcc;
9283                 if (tg3_nvram_lock(tp))
9284                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9285                 else
9286                         tg3_nvram_unlock(tp);
9287         }
9288
9289         /* First try to get it from MAC address mailbox. */
9290         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9291         if ((hi >> 16) == 0x484b) {
9292                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9293                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9294
9295                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9296                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9297                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9298                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9299                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9300         }
9301         /* Next, try NVRAM. */
9302         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9303                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9304                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9305                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9306                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9307                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9308                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9309                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9310                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9311         }
9312         /* Finally just fetch it out of the MAC control regs. */
9313         else {
9314                 hi = tr32(MAC_ADDR_0_HIGH);
9315                 lo = tr32(MAC_ADDR_0_LOW);
9316
9317                 dev->dev_addr[5] = lo & 0xff;
9318                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9319                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9320                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9321                 dev->dev_addr[1] = hi & 0xff;
9322                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9323         }
9324
9325         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9326 #ifdef CONFIG_SPARC64
9327                 if (!tg3_get_default_macaddr_sparc(tp))
9328                         return 0;
9329 #endif
9330                 return -EINVAL;
9331         }
9332         return 0;
9333 }
9334
9335 #define BOUNDARY_SINGLE_CACHELINE       1
9336 #define BOUNDARY_MULTI_CACHELINE        2
9337
9338 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9339 {
9340         int cacheline_size;
9341         u8 byte;
9342         int goal;
9343
9344         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9345         if (byte == 0)
9346                 cacheline_size = 1024;
9347         else
9348                 cacheline_size = (int) byte * 4;
9349
9350         /* On 5703 and later chips, the boundary bits have no
9351          * effect.
9352          */
9353         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9354             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9355             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9356                 goto out;
9357
9358 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9359         goal = BOUNDARY_MULTI_CACHELINE;
9360 #else
9361 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9362         goal = BOUNDARY_SINGLE_CACHELINE;
9363 #else
9364         goal = 0;
9365 #endif
9366 #endif
9367
9368         if (!goal)
9369                 goto out;
9370
9371         /* PCI controllers on most RISC systems tend to disconnect
9372          * when a device tries to burst across a cache-line boundary.
9373          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9374          *
9375          * Unfortunately, for PCI-E there are only limited
9376          * write-side controls for this, and thus for reads
9377          * we will still get the disconnects.  We'll also waste
9378          * these PCI cycles for both read and write for chips
9379          * other than 5700 and 5701 which do not implement the
9380          * boundary bits.
9381          */
9382         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9383             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9384                 switch (cacheline_size) {
9385                 case 16:
9386                 case 32:
9387                 case 64:
9388                 case 128:
9389                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9390                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9391                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9392                         } else {
9393                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9394                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9395                         }
9396                         break;
9397
9398                 case 256:
9399                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9400                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9401                         break;
9402
9403                 default:
9404                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9405                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9406                         break;
9407                 };
9408         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9409                 switch (cacheline_size) {
9410                 case 16:
9411                 case 32:
9412                 case 64:
9413                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9414                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9415                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9416                                 break;
9417                         }
9418                         /* fallthrough */
9419                 case 128:
9420                 default:
9421                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9422                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9423                         break;
9424                 };
9425         } else {
9426                 switch (cacheline_size) {
9427                 case 16:
9428                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9429                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9430                                         DMA_RWCTRL_WRITE_BNDRY_16);
9431                                 break;
9432                         }
9433                         /* fallthrough */
9434                 case 32:
9435                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9436                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9437                                         DMA_RWCTRL_WRITE_BNDRY_32);
9438                                 break;
9439                         }
9440                         /* fallthrough */
9441                 case 64:
9442                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9443                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9444                                         DMA_RWCTRL_WRITE_BNDRY_64);
9445                                 break;
9446                         }
9447                         /* fallthrough */
9448                 case 128:
9449                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9450                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9451                                         DMA_RWCTRL_WRITE_BNDRY_128);
9452                                 break;
9453                         }
9454                         /* fallthrough */
9455                 case 256:
9456                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9457                                 DMA_RWCTRL_WRITE_BNDRY_256);
9458                         break;
9459                 case 512:
9460                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9461                                 DMA_RWCTRL_WRITE_BNDRY_512);
9462                         break;
9463                 case 1024:
9464                 default:
9465                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9466                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9467                         break;
9468                 };
9469         }
9470
9471 out:
9472         return val;
9473 }
9474
9475 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9476 {
9477         struct tg3_internal_buffer_desc test_desc;
9478         u32 sram_dma_descs;
9479         int i, ret;
9480
9481         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9482
9483         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9484         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9485         tw32(RDMAC_STATUS, 0);
9486         tw32(WDMAC_STATUS, 0);
9487
9488         tw32(BUFMGR_MODE, 0);
9489         tw32(FTQ_RESET, 0);
9490
9491         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9492         test_desc.addr_lo = buf_dma & 0xffffffff;
9493         test_desc.nic_mbuf = 0x00002100;
9494         test_desc.len = size;
9495
9496         /*
9497          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9498          * the *second* time the tg3 driver was getting loaded after an
9499          * initial scan.
9500          *
9501          * Broadcom tells me:
9502          *   ...the DMA engine is connected to the GRC block and a DMA
9503          *   reset may affect the GRC block in some unpredictable way...
9504          *   The behavior of resets to individual blocks has not been tested.
9505          *
9506          * Broadcom noted the GRC reset will also reset all sub-components.
9507          */
9508         if (to_device) {
9509                 test_desc.cqid_sqid = (13 << 8) | 2;
9510
9511                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9512                 udelay(40);
9513         } else {
9514                 test_desc.cqid_sqid = (16 << 8) | 7;
9515
9516                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9517                 udelay(40);
9518         }
9519         test_desc.flags = 0x00000005;
9520
9521         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9522                 u32 val;
9523
9524                 val = *(((u32 *)&test_desc) + i);
9525                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9526                                        sram_dma_descs + (i * sizeof(u32)));
9527                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9528         }
9529         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9530
9531         if (to_device) {
9532                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9533         } else {
9534                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9535         }
9536
9537         ret = -ENODEV;
9538         for (i = 0; i < 40; i++) {
9539                 u32 val;
9540
9541                 if (to_device)
9542                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9543                 else
9544                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9545                 if ((val & 0xffff) == sram_dma_descs) {
9546                         ret = 0;
9547                         break;
9548                 }
9549
9550                 udelay(100);
9551         }
9552
9553         return ret;
9554 }
9555
9556 #define TEST_BUFFER_SIZE        0x2000
9557
9558 static int __devinit tg3_test_dma(struct tg3 *tp)
9559 {
9560         dma_addr_t buf_dma;
9561         u32 *buf, saved_dma_rwctrl;
9562         int ret;
9563
9564         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9565         if (!buf) {
9566                 ret = -ENOMEM;
9567                 goto out_nofree;
9568         }
9569
9570         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9571                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9572
9573         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9574
9575         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9576                 /* DMA read watermark not used on PCIE */
9577                 tp->dma_rwctrl |= 0x00180000;
9578         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9579                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9580                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9581                         tp->dma_rwctrl |= 0x003f0000;
9582                 else
9583                         tp->dma_rwctrl |= 0x003f000f;
9584         } else {
9585                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9586                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9587                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9588
9589                         if (ccval == 0x6 || ccval == 0x7)
9590                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9591
9592                         /* Set bit 23 to enable PCIX hw bug fix */
9593                         tp->dma_rwctrl |= 0x009f0000;
9594                 } else {
9595                         tp->dma_rwctrl |= 0x001b000f;
9596                 }
9597         }
9598
9599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9600             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9601                 tp->dma_rwctrl &= 0xfffffff0;
9602
9603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9604             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9605                 /* Remove this if it causes problems for some boards. */
9606                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9607
9608                 /* On 5700/5701 chips, we need to set this bit.
9609                  * Otherwise the chip will issue cacheline transactions
9610                  * to streamable DMA memory with not all the byte
9611                  * enables turned on.  This is an error on several
9612                  * RISC PCI controllers, in particular sparc64.
9613                  *
9614                  * On 5703/5704 chips, this bit has been reassigned
9615                  * a different meaning.  In particular, it is used
9616                  * on those chips to enable a PCI-X workaround.
9617                  */
9618                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9619         }
9620
9621         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9622
9623 #if 0
9624         /* Unneeded, already done by tg3_get_invariants.  */
9625         tg3_switch_clocks(tp);
9626 #endif
9627
9628         ret = 0;
9629         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9630             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9631                 goto out;
9632
9633         /* It is best to perform DMA test with maximum write burst size
9634          * to expose the 5700/5701 write DMA bug.
9635          */
9636         saved_dma_rwctrl = tp->dma_rwctrl;
9637         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9638         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9639
9640         while (1) {
9641                 u32 *p = buf, i;
9642
9643                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9644                         p[i] = i;
9645
9646                 /* Send the buffer to the chip. */
9647                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9648                 if (ret) {
9649                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9650                         break;
9651                 }
9652
9653 #if 0
9654                 /* validate data reached card RAM correctly. */
9655                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9656                         u32 val;
9657                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
9658                         if (le32_to_cpu(val) != p[i]) {
9659                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
9660                                 /* ret = -ENODEV here? */
9661                         }
9662                         p[i] = 0;
9663                 }
9664 #endif
9665                 /* Now read it back. */
9666                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9667                 if (ret) {
9668                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
9669
9670                         break;
9671                 }
9672
9673                 /* Verify it. */
9674                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9675                         if (p[i] == i)
9676                                 continue;
9677
9678                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9679                             DMA_RWCTRL_WRITE_BNDRY_16) {
9680                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9681                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9682                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9683                                 break;
9684                         } else {
9685                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
9686                                 ret = -ENODEV;
9687                                 goto out;
9688                         }
9689                 }
9690
9691                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
9692                         /* Success. */
9693                         ret = 0;
9694                         break;
9695                 }
9696         }
9697         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9698             DMA_RWCTRL_WRITE_BNDRY_16) {
9699                 static struct pci_device_id dma_wait_state_chipsets[] = {
9700                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9701                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9702                         { },
9703                 };
9704
9705                 /* DMA test passed without adjusting DMA boundary,
9706                  * now look for chipsets that are known to expose the
9707                  * DMA bug without failing the test.
9708                  */
9709                 if (pci_dev_present(dma_wait_state_chipsets)) {
9710                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9711                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9712                 }
9713                 else
9714                         /* Safe to use the calculated DMA boundary. */
9715                         tp->dma_rwctrl = saved_dma_rwctrl;
9716
9717                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9718         }
9719
9720 out:
9721         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
9722 out_nofree:
9723         return ret;
9724 }
9725
9726 static void __devinit tg3_init_link_config(struct tg3 *tp)
9727 {
9728         tp->link_config.advertising =
9729                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9730                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9731                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
9732                  ADVERTISED_Autoneg | ADVERTISED_MII);
9733         tp->link_config.speed = SPEED_INVALID;
9734         tp->link_config.duplex = DUPLEX_INVALID;
9735         tp->link_config.autoneg = AUTONEG_ENABLE;
9736         netif_carrier_off(tp->dev);
9737         tp->link_config.active_speed = SPEED_INVALID;
9738         tp->link_config.active_duplex = DUPLEX_INVALID;
9739         tp->link_config.phy_is_low_power = 0;
9740         tp->link_config.orig_speed = SPEED_INVALID;
9741         tp->link_config.orig_duplex = DUPLEX_INVALID;
9742         tp->link_config.orig_autoneg = AUTONEG_INVALID;
9743 }
9744
9745 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
9746 {
9747         tp->bufmgr_config.mbuf_read_dma_low_water =
9748                 DEFAULT_MB_RDMA_LOW_WATER;
9749         tp->bufmgr_config.mbuf_mac_rx_low_water =
9750                 DEFAULT_MB_MACRX_LOW_WATER;
9751         tp->bufmgr_config.mbuf_high_water =
9752                 DEFAULT_MB_HIGH_WATER;
9753
9754         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
9755                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
9756         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
9757                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
9758         tp->bufmgr_config.mbuf_high_water_jumbo =
9759                 DEFAULT_MB_HIGH_WATER_JUMBO;
9760
9761         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
9762         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
9763 }
9764
9765 static char * __devinit tg3_phy_string(struct tg3 *tp)
9766 {
9767         switch (tp->phy_id & PHY_ID_MASK) {
9768         case PHY_ID_BCM5400:    return "5400";
9769         case PHY_ID_BCM5401:    return "5401";
9770         case PHY_ID_BCM5411:    return "5411";
9771         case PHY_ID_BCM5701:    return "5701";
9772         case PHY_ID_BCM5703:    return "5703";
9773         case PHY_ID_BCM5704:    return "5704";
9774         case PHY_ID_BCM5705:    return "5705";
9775         case PHY_ID_BCM5750:    return "5750";
9776         case PHY_ID_BCM5752:    return "5752";
9777         case PHY_ID_BCM8002:    return "8002/serdes";
9778         case 0:                 return "serdes";
9779         default:                return "unknown";
9780         };
9781 }
9782
9783 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9784 {
9785         struct pci_dev *peer;
9786         unsigned int func, devnr = tp->pdev->devfn & ~7;
9787
9788         for (func = 0; func < 8; func++) {
9789                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9790                 if (peer && peer != tp->pdev)
9791                         break;
9792                 pci_dev_put(peer);
9793         }
9794         if (!peer || peer == tp->pdev)
9795                 BUG();
9796
9797         /*
9798          * We don't need to keep the refcount elevated; there's no way
9799          * to remove one half of this device without removing the other
9800          */
9801         pci_dev_put(peer);
9802
9803         return peer;
9804 }
9805
9806 static void __devinit tg3_init_coal(struct tg3 *tp)
9807 {
9808         struct ethtool_coalesce *ec = &tp->coal;
9809
9810         memset(ec, 0, sizeof(*ec));
9811         ec->cmd = ETHTOOL_GCOALESCE;
9812         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9813         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9814         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9815         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9816         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9817         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9818         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9819         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9820         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9821
9822         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9823                                  HOSTCC_MODE_CLRTICK_TXBD)) {
9824                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9825                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9826                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9827                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9828         }
9829 }
9830
9831 static int __devinit tg3_init_one(struct pci_dev *pdev,
9832                                   const struct pci_device_id *ent)
9833 {
9834         static int tg3_version_printed = 0;
9835         unsigned long tg3reg_base, tg3reg_len;
9836         struct net_device *dev;
9837         struct tg3 *tp;
9838         int i, err, pci_using_dac, pm_cap;
9839
9840         if (tg3_version_printed++ == 0)
9841                 printk(KERN_INFO "%s", version);
9842
9843         err = pci_enable_device(pdev);
9844         if (err) {
9845                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9846                        "aborting.\n");
9847                 return err;
9848         }
9849
9850         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9851                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9852                        "base address, aborting.\n");
9853                 err = -ENODEV;
9854                 goto err_out_disable_pdev;
9855         }
9856
9857         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9858         if (err) {
9859                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9860                        "aborting.\n");
9861                 goto err_out_disable_pdev;
9862         }
9863
9864         pci_set_master(pdev);
9865
9866         /* Find power-management capability. */
9867         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9868         if (pm_cap == 0) {
9869                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9870                        "aborting.\n");
9871                 err = -EIO;
9872                 goto err_out_free_res;
9873         }
9874
9875         /* Configure DMA attributes. */
9876         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9877         if (!err) {
9878                 pci_using_dac = 1;
9879                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9880                 if (err < 0) {
9881                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9882                                "for consistent allocations\n");
9883                         goto err_out_free_res;
9884                 }
9885         } else {
9886                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9887                 if (err) {
9888                         printk(KERN_ERR PFX "No usable DMA configuration, "
9889                                "aborting.\n");
9890                         goto err_out_free_res;
9891                 }
9892                 pci_using_dac = 0;
9893         }
9894
9895         tg3reg_base = pci_resource_start(pdev, 0);
9896         tg3reg_len = pci_resource_len(pdev, 0);
9897
9898         dev = alloc_etherdev(sizeof(*tp));
9899         if (!dev) {
9900                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9901                 err = -ENOMEM;
9902                 goto err_out_free_res;
9903         }
9904
9905         SET_MODULE_OWNER(dev);
9906         SET_NETDEV_DEV(dev, &pdev->dev);
9907
9908         if (pci_using_dac)
9909                 dev->features |= NETIF_F_HIGHDMA;
9910         dev->features |= NETIF_F_LLTX;
9911 #if TG3_VLAN_TAG_USED
9912         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9913         dev->vlan_rx_register = tg3_vlan_rx_register;
9914         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9915 #endif
9916
9917         tp = netdev_priv(dev);
9918         tp->pdev = pdev;
9919         tp->dev = dev;
9920         tp->pm_cap = pm_cap;
9921         tp->mac_mode = TG3_DEF_MAC_MODE;
9922         tp->rx_mode = TG3_DEF_RX_MODE;
9923         tp->tx_mode = TG3_DEF_TX_MODE;
9924         tp->mi_mode = MAC_MI_MODE_BASE;
9925         if (tg3_debug > 0)
9926                 tp->msg_enable = tg3_debug;
9927         else
9928                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9929
9930         /* The word/byte swap controls here control register access byte
9931          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
9932          * setting below.
9933          */
9934         tp->misc_host_ctrl =
9935                 MISC_HOST_CTRL_MASK_PCI_INT |
9936                 MISC_HOST_CTRL_WORD_SWAP |
9937                 MISC_HOST_CTRL_INDIR_ACCESS |
9938                 MISC_HOST_CTRL_PCISTATE_RW;
9939
9940         /* The NONFRM (non-frame) byte/word swap controls take effect
9941          * on descriptor entries, anything which isn't packet data.
9942          *
9943          * The StrongARM chips on the board (one for tx, one for rx)
9944          * are running in big-endian mode.
9945          */
9946         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9947                         GRC_MODE_WSWAP_NONFRM_DATA);
9948 #ifdef __BIG_ENDIAN
9949         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9950 #endif
9951         spin_lock_init(&tp->lock);
9952         spin_lock_init(&tp->tx_lock);
9953         spin_lock_init(&tp->indirect_lock);
9954         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9955
9956         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9957         if (tp->regs == 0UL) {
9958                 printk(KERN_ERR PFX "Cannot map device registers, "
9959                        "aborting.\n");
9960                 err = -ENOMEM;
9961                 goto err_out_free_dev;
9962         }
9963
9964         tg3_init_link_config(tp);
9965
9966         tg3_init_bufmgr_config(tp);
9967
9968         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9969         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9970         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9971
9972         dev->open = tg3_open;
9973         dev->stop = tg3_close;
9974         dev->get_stats = tg3_get_stats;
9975         dev->set_multicast_list = tg3_set_rx_mode;
9976         dev->set_mac_address = tg3_set_mac_addr;
9977         dev->do_ioctl = tg3_ioctl;
9978         dev->tx_timeout = tg3_tx_timeout;
9979         dev->poll = tg3_poll;
9980         dev->ethtool_ops = &tg3_ethtool_ops;
9981         dev->weight = 64;
9982         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9983         dev->change_mtu = tg3_change_mtu;
9984         dev->irq = pdev->irq;
9985 #ifdef CONFIG_NET_POLL_CONTROLLER
9986         dev->poll_controller = tg3_poll_controller;
9987 #endif
9988
9989         err = tg3_get_invariants(tp);
9990         if (err) {
9991                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9992                        "aborting.\n");
9993                 goto err_out_iounmap;
9994         }
9995
9996         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9997                 tp->bufmgr_config.mbuf_read_dma_low_water =
9998                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9999                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10000                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10001                 tp->bufmgr_config.mbuf_high_water =
10002                         DEFAULT_MB_HIGH_WATER_5705;
10003         }
10004
10005 #if TG3_TSO_SUPPORT != 0
10006         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10007                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10008         }
10009         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10011             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10012             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10013                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10014         } else {
10015                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10016         }
10017
10018         /* TSO is off by default, user can enable using ethtool.  */
10019 #if 0
10020         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10021                 dev->features |= NETIF_F_TSO;
10022 #endif
10023
10024 #endif
10025
10026         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10027             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10028             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10029                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10030                 tp->rx_pending = 63;
10031         }
10032
10033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10034                 tp->pdev_peer = tg3_find_5704_peer(tp);
10035
10036         err = tg3_get_device_address(tp);
10037         if (err) {
10038                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10039                        "aborting.\n");
10040                 goto err_out_iounmap;
10041         }
10042
10043         /*
10044          * Reset chip in case UNDI or EFI driver did not shutdown
10045          * DMA self test will enable WDMAC and we'll see (spurious)
10046          * pending DMA on the PCI bus at that point.
10047          */
10048         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10049             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10050                 pci_save_state(tp->pdev);
10051                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10052                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10053         }
10054
10055         err = tg3_test_dma(tp);
10056         if (err) {
10057                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10058                 goto err_out_iounmap;
10059         }
10060
10061         /* Tigon3 can do ipv4 only... and some chips have buggy
10062          * checksumming.
10063          */
10064         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10065                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10066                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10067         } else
10068                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10069
10070         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10071                 dev->features &= ~NETIF_F_HIGHDMA;
10072
10073         /* flow control autonegotiation is default behavior */
10074         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10075
10076         tg3_init_coal(tp);
10077
10078         err = register_netdev(dev);
10079         if (err) {
10080                 printk(KERN_ERR PFX "Cannot register net device, "
10081                        "aborting.\n");
10082                 goto err_out_iounmap;
10083         }
10084
10085         pci_set_drvdata(pdev, dev);
10086
10087         /* Now that we have fully setup the chip, save away a snapshot
10088          * of the PCI config space.  We need to restore this after
10089          * GRC_MISC_CFG core clock resets and some resume events.
10090          */
10091         pci_save_state(tp->pdev);
10092
10093         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10094                dev->name,
10095                tp->board_part_number,
10096                tp->pci_chip_rev_id,
10097                tg3_phy_string(tp),
10098                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10099                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10100                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10101                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10102                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10103                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10104
10105         for (i = 0; i < 6; i++)
10106                 printk("%2.2x%c", dev->dev_addr[i],
10107                        i == 5 ? '\n' : ':');
10108
10109         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10110                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10111                "TSOcap[%d] \n",
10112                dev->name,
10113                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10114                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10115                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10116                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10117                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10118                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10119                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10120         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10121                dev->name, tp->dma_rwctrl);
10122
10123         return 0;
10124
10125 err_out_iounmap:
10126         iounmap(tp->regs);
10127
10128 err_out_free_dev:
10129         free_netdev(dev);
10130
10131 err_out_free_res:
10132         pci_release_regions(pdev);
10133
10134 err_out_disable_pdev:
10135         pci_disable_device(pdev);
10136         pci_set_drvdata(pdev, NULL);
10137         return err;
10138 }
10139
10140 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10141 {
10142         struct net_device *dev = pci_get_drvdata(pdev);
10143
10144         if (dev) {
10145                 struct tg3 *tp = netdev_priv(dev);
10146
10147                 unregister_netdev(dev);
10148                 iounmap(tp->regs);
10149                 free_netdev(dev);
10150                 pci_release_regions(pdev);
10151                 pci_disable_device(pdev);
10152                 pci_set_drvdata(pdev, NULL);
10153         }
10154 }
10155
10156 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10157 {
10158         struct net_device *dev = pci_get_drvdata(pdev);
10159         struct tg3 *tp = netdev_priv(dev);
10160         int err;
10161
10162         if (!netif_running(dev))
10163                 return 0;
10164
10165         tg3_netif_stop(tp);
10166
10167         del_timer_sync(&tp->timer);
10168
10169         spin_lock_irq(&tp->lock);
10170         spin_lock(&tp->tx_lock);
10171         tg3_disable_ints(tp);
10172         spin_unlock(&tp->tx_lock);
10173         spin_unlock_irq(&tp->lock);
10174
10175         netif_device_detach(dev);
10176
10177         spin_lock_irq(&tp->lock);
10178         spin_lock(&tp->tx_lock);
10179         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10180         spin_unlock(&tp->tx_lock);
10181         spin_unlock_irq(&tp->lock);
10182
10183         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10184         if (err) {
10185                 spin_lock_irq(&tp->lock);
10186                 spin_lock(&tp->tx_lock);
10187
10188                 tg3_init_hw(tp);
10189
10190                 tp->timer.expires = jiffies + tp->timer_offset;
10191                 add_timer(&tp->timer);
10192
10193                 netif_device_attach(dev);
10194                 tg3_netif_start(tp);
10195
10196                 spin_unlock(&tp->tx_lock);
10197                 spin_unlock_irq(&tp->lock);
10198         }
10199
10200         return err;
10201 }
10202
10203 static int tg3_resume(struct pci_dev *pdev)
10204 {
10205         struct net_device *dev = pci_get_drvdata(pdev);
10206         struct tg3 *tp = netdev_priv(dev);
10207         int err;
10208
10209         if (!netif_running(dev))
10210                 return 0;
10211
10212         pci_restore_state(tp->pdev);
10213
10214         err = tg3_set_power_state(tp, 0);
10215         if (err)
10216                 return err;
10217
10218         netif_device_attach(dev);
10219
10220         spin_lock_irq(&tp->lock);
10221         spin_lock(&tp->tx_lock);
10222
10223         tg3_init_hw(tp);
10224
10225         tp->timer.expires = jiffies + tp->timer_offset;
10226         add_timer(&tp->timer);
10227
10228         tg3_enable_ints(tp);
10229
10230         tg3_netif_start(tp);
10231
10232         spin_unlock(&tp->tx_lock);
10233         spin_unlock_irq(&tp->lock);
10234
10235         return 0;
10236 }
10237
10238 static struct pci_driver tg3_driver = {
10239         .name           = DRV_MODULE_NAME,
10240         .id_table       = tg3_pci_tbl,
10241         .probe          = tg3_init_one,
10242         .remove         = __devexit_p(tg3_remove_one),
10243         .suspend        = tg3_suspend,
10244         .resume         = tg3_resume
10245 };
10246
10247 static int __init tg3_init(void)
10248 {
10249         return pci_module_init(&tg3_driver);
10250 }
10251
10252 static void __exit tg3_cleanup(void)
10253 {
10254         pci_unregister_driver(&tg3_driver);
10255 }
10256
10257 module_init(tg3_init);
10258 module_exit(tg3_cleanup);