Merge to Fedora kernel-2.6.18-1.2255_FC5-vs2.0.2.2-rc9 patched with stable patch...
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { 0, }
263 };
264
265 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
266
267 static struct {
268         const char string[ETH_GSTRING_LEN];
269 } ethtool_stats_keys[TG3_NUM_STATS] = {
270         { "rx_octets" },
271         { "rx_fragments" },
272         { "rx_ucast_packets" },
273         { "rx_mcast_packets" },
274         { "rx_bcast_packets" },
275         { "rx_fcs_errors" },
276         { "rx_align_errors" },
277         { "rx_xon_pause_rcvd" },
278         { "rx_xoff_pause_rcvd" },
279         { "rx_mac_ctrl_rcvd" },
280         { "rx_xoff_entered" },
281         { "rx_frame_too_long_errors" },
282         { "rx_jabbers" },
283         { "rx_undersize_packets" },
284         { "rx_in_length_errors" },
285         { "rx_out_length_errors" },
286         { "rx_64_or_less_octet_packets" },
287         { "rx_65_to_127_octet_packets" },
288         { "rx_128_to_255_octet_packets" },
289         { "rx_256_to_511_octet_packets" },
290         { "rx_512_to_1023_octet_packets" },
291         { "rx_1024_to_1522_octet_packets" },
292         { "rx_1523_to_2047_octet_packets" },
293         { "rx_2048_to_4095_octet_packets" },
294         { "rx_4096_to_8191_octet_packets" },
295         { "rx_8192_to_9022_octet_packets" },
296
297         { "tx_octets" },
298         { "tx_collisions" },
299
300         { "tx_xon_sent" },
301         { "tx_xoff_sent" },
302         { "tx_flow_control" },
303         { "tx_mac_errors" },
304         { "tx_single_collisions" },
305         { "tx_mult_collisions" },
306         { "tx_deferred" },
307         { "tx_excessive_collisions" },
308         { "tx_late_collisions" },
309         { "tx_collide_2times" },
310         { "tx_collide_3times" },
311         { "tx_collide_4times" },
312         { "tx_collide_5times" },
313         { "tx_collide_6times" },
314         { "tx_collide_7times" },
315         { "tx_collide_8times" },
316         { "tx_collide_9times" },
317         { "tx_collide_10times" },
318         { "tx_collide_11times" },
319         { "tx_collide_12times" },
320         { "tx_collide_13times" },
321         { "tx_collide_14times" },
322         { "tx_collide_15times" },
323         { "tx_ucast_packets" },
324         { "tx_mcast_packets" },
325         { "tx_bcast_packets" },
326         { "tx_carrier_sense_errors" },
327         { "tx_discards" },
328         { "tx_errors" },
329
330         { "dma_writeq_full" },
331         { "dma_write_prioq_full" },
332         { "rxbds_empty" },
333         { "rx_discards" },
334         { "rx_errors" },
335         { "rx_threshold_hit" },
336
337         { "dma_readq_full" },
338         { "dma_read_prioq_full" },
339         { "tx_comp_queue_full" },
340
341         { "ring_set_send_prod_index" },
342         { "ring_status_update" },
343         { "nic_irqs" },
344         { "nic_avoided_irqs" },
345         { "nic_tx_threshold_hit" }
346 };
347
348 static struct {
349         const char string[ETH_GSTRING_LEN];
350 } ethtool_test_keys[TG3_NUM_TEST] = {
351         { "nvram test     (online) " },
352         { "link test      (online) " },
353         { "register test  (offline)" },
354         { "memory test    (offline)" },
355         { "loopback test  (offline)" },
356         { "interrupt test (offline)" },
357 };
358
359 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
360 {
361         writel(val, tp->regs + off);
362 }
363
364 static u32 tg3_read32(struct tg3 *tp, u32 off)
365 {
366         return (readl(tp->regs + off)); 
367 }
368
369 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
370 {
371         unsigned long flags;
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377 }
378
379 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
380 {
381         writel(val, tp->regs + off);
382         readl(tp->regs + off);
383 }
384
385 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
386 {
387         unsigned long flags;
388         u32 val;
389
390         spin_lock_irqsave(&tp->indirect_lock, flags);
391         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
392         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
393         spin_unlock_irqrestore(&tp->indirect_lock, flags);
394         return val;
395 }
396
397 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
398 {
399         unsigned long flags;
400
401         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
402                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
403                                        TG3_64BIT_REG_LOW, val);
404                 return;
405         }
406         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
407                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
408                                        TG3_64BIT_REG_LOW, val);
409                 return;
410         }
411
412         spin_lock_irqsave(&tp->indirect_lock, flags);
413         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
414         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
415         spin_unlock_irqrestore(&tp->indirect_lock, flags);
416
417         /* In indirect mode when disabling interrupts, we also need
418          * to clear the interrupt bit in the GRC local ctrl register.
419          */
420         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
421             (val == 0x1)) {
422                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
423                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
424         }
425 }
426
427 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
428 {
429         unsigned long flags;
430         u32 val;
431
432         spin_lock_irqsave(&tp->indirect_lock, flags);
433         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
434         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
435         spin_unlock_irqrestore(&tp->indirect_lock, flags);
436         return val;
437 }
438
439 /* usec_wait specifies the wait time in usec when writing to certain registers
440  * where it is unsafe to read back the register without some delay.
441  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
442  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
443  */
444 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
445 {
446         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
447             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
448                 /* Non-posted methods */
449                 tp->write32(tp, off, val);
450         else {
451                 /* Posted method */
452                 tg3_write32(tp, off, val);
453                 if (usec_wait)
454                         udelay(usec_wait);
455                 tp->read32(tp, off);
456         }
457         /* Wait again after the read for the posted method to guarantee that
458          * the wait time is met.
459          */
460         if (usec_wait)
461                 udelay(usec_wait);
462 }
463
464 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
465 {
466         tp->write32_mbox(tp, off, val);
467         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
468             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
469                 tp->read32_mbox(tp, off);
470 }
471
472 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
473 {
474         void __iomem *mbox = tp->regs + off;
475         writel(val, mbox);
476         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
477                 writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
479                 readl(mbox);
480 }
481
482 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
483 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
484 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
485 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
486 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
487
488 #define tw32(reg,val)           tp->write32(tp, reg, val)
489 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
490 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
491 #define tr32(reg)               tp->read32(tp, reg)
492
493 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
494 {
495         unsigned long flags;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
499                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
500                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
501
502                 /* Always leave this as zero. */
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
504         } else {
505                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
506                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
507
508                 /* Always leave this as zero. */
509                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
510         }
511         spin_unlock_irqrestore(&tp->indirect_lock, flags);
512 }
513
514 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
515 {
516         unsigned long flags;
517
518         spin_lock_irqsave(&tp->indirect_lock, flags);
519         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
521                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
522
523                 /* Always leave this as zero. */
524                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
525         } else {
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
527                 *val = tr32(TG3PCI_MEM_WIN_DATA);
528
529                 /* Always leave this as zero. */
530                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
531         }
532         spin_unlock_irqrestore(&tp->indirect_lock, flags);
533 }
534
535 static void tg3_disable_ints(struct tg3 *tp)
536 {
537         tw32(TG3PCI_MISC_HOST_CTRL,
538              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
539         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
540 }
541
542 static inline void tg3_cond_int(struct tg3 *tp)
543 {
544         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
545             (tp->hw_status->status & SD_STATUS_UPDATED))
546                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
547 }
548
549 static void tg3_enable_ints(struct tg3 *tp)
550 {
551         tp->irq_sync = 0;
552         wmb();
553
554         tw32(TG3PCI_MISC_HOST_CTRL,
555              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
556         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
557                        (tp->last_tag << 24));
558         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
559                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560                                (tp->last_tag << 24));
561         tg3_cond_int(tp);
562 }
563
564 static inline unsigned int tg3_has_work(struct tg3 *tp)
565 {
566         struct tg3_hw_status *sblk = tp->hw_status;
567         unsigned int work_exists = 0;
568
569         /* check for phy events */
570         if (!(tp->tg3_flags &
571               (TG3_FLAG_USE_LINKCHG_REG |
572                TG3_FLAG_POLL_SERDES))) {
573                 if (sblk->status & SD_STATUS_LINK_CHG)
574                         work_exists = 1;
575         }
576         /* check for RX/TX work to do */
577         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
578             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
579                 work_exists = 1;
580
581         return work_exists;
582 }
583
584 /* tg3_restart_ints
585  *  similar to tg3_enable_ints, but it accurately determines whether there
586  *  is new work pending and can return without flushing the PIO write
587  *  which reenables interrupts 
588  */
589 static void tg3_restart_ints(struct tg3 *tp)
590 {
591         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
592                      tp->last_tag << 24);
593         mmiowb();
594
595         /* When doing tagged status, this work check is unnecessary.
596          * The last_tag we write above tells the chip which piece of
597          * work we've completed.
598          */
599         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
600             tg3_has_work(tp))
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static inline void tg3_netif_stop(struct tg3 *tp)
606 {
607         tp->dev->trans_start = jiffies; /* prevent tx timeout */
608         netif_poll_disable(tp->dev);
609         netif_tx_disable(tp->dev);
610 }
611
612 static inline void tg3_netif_start(struct tg3 *tp)
613 {
614         netif_wake_queue(tp->dev);
615         /* NOTE: unconditional netif_wake_queue is only appropriate
616          * so long as all callers are assured to have free tx slots
617          * (such as after tg3_init_hw)
618          */
619         netif_poll_enable(tp->dev);
620         tp->hw_status->status |= SD_STATUS_UPDATED;
621         tg3_enable_ints(tp);
622 }
623
624 static void tg3_switch_clocks(struct tg3 *tp)
625 {
626         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
627         u32 orig_clock_ctrl;
628
629         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
630                 return;
631
632         orig_clock_ctrl = clock_ctrl;
633         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
634                        CLOCK_CTRL_CLKRUN_OENABLE |
635                        0x1f);
636         tp->pci_clock_ctrl = clock_ctrl;
637
638         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
639                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
640                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
641                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
642                 }
643         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
644                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645                             clock_ctrl |
646                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
647                             40);
648                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
650                             40);
651         }
652         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
653 }
654
655 #define PHY_BUSY_LOOPS  5000
656
657 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
658 {
659         u32 frame_val;
660         unsigned int loops;
661         int ret;
662
663         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
664                 tw32_f(MAC_MI_MODE,
665                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
666                 udelay(80);
667         }
668
669         *val = 0x0;
670
671         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
672                       MI_COM_PHY_ADDR_MASK);
673         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
674                       MI_COM_REG_ADDR_MASK);
675         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
676         
677         tw32_f(MAC_MI_COM, frame_val);
678
679         loops = PHY_BUSY_LOOPS;
680         while (loops != 0) {
681                 udelay(10);
682                 frame_val = tr32(MAC_MI_COM);
683
684                 if ((frame_val & MI_COM_BUSY) == 0) {
685                         udelay(5);
686                         frame_val = tr32(MAC_MI_COM);
687                         break;
688                 }
689                 loops -= 1;
690         }
691
692         ret = -EBUSY;
693         if (loops != 0) {
694                 *val = frame_val & MI_COM_DATA_MASK;
695                 ret = 0;
696         }
697
698         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
699                 tw32_f(MAC_MI_MODE, tp->mi_mode);
700                 udelay(80);
701         }
702
703         return ret;
704 }
705
706 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
707 {
708         u32 frame_val;
709         unsigned int loops;
710         int ret;
711
712         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
713                 tw32_f(MAC_MI_MODE,
714                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
715                 udelay(80);
716         }
717
718         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
719                       MI_COM_PHY_ADDR_MASK);
720         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
721                       MI_COM_REG_ADDR_MASK);
722         frame_val |= (val & MI_COM_DATA_MASK);
723         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
724         
725         tw32_f(MAC_MI_COM, frame_val);
726
727         loops = PHY_BUSY_LOOPS;
728         while (loops != 0) {
729                 udelay(10);
730                 frame_val = tr32(MAC_MI_COM);
731                 if ((frame_val & MI_COM_BUSY) == 0) {
732                         udelay(5);
733                         frame_val = tr32(MAC_MI_COM);
734                         break;
735                 }
736                 loops -= 1;
737         }
738
739         ret = -EBUSY;
740         if (loops != 0)
741                 ret = 0;
742
743         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
744                 tw32_f(MAC_MI_MODE, tp->mi_mode);
745                 udelay(80);
746         }
747
748         return ret;
749 }
750
751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
752 {
753         u32 val;
754
755         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
756                 return;
757
758         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
759             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
760                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
761                              (val | (1 << 15) | (1 << 4)));
762 }
763
764 static int tg3_bmcr_reset(struct tg3 *tp)
765 {
766         u32 phy_control;
767         int limit, err;
768
769         /* OK, reset it, and poll the BMCR_RESET bit until it
770          * clears or we time out.
771          */
772         phy_control = BMCR_RESET;
773         err = tg3_writephy(tp, MII_BMCR, phy_control);
774         if (err != 0)
775                 return -EBUSY;
776
777         limit = 5000;
778         while (limit--) {
779                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
780                 if (err != 0)
781                         return -EBUSY;
782
783                 if ((phy_control & BMCR_RESET) == 0) {
784                         udelay(40);
785                         break;
786                 }
787                 udelay(10);
788         }
789         if (limit <= 0)
790                 return -EBUSY;
791
792         return 0;
793 }
794
795 static int tg3_wait_macro_done(struct tg3 *tp)
796 {
797         int limit = 100;
798
799         while (limit--) {
800                 u32 tmp32;
801
802                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
803                         if ((tmp32 & 0x1000) == 0)
804                                 break;
805                 }
806         }
807         if (limit <= 0)
808                 return -EBUSY;
809
810         return 0;
811 }
812
813 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
814 {
815         static const u32 test_pat[4][6] = {
816         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
817         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
818         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
819         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
820         };
821         int chan;
822
823         for (chan = 0; chan < 4; chan++) {
824                 int i;
825
826                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
827                              (chan * 0x2000) | 0x0200);
828                 tg3_writephy(tp, 0x16, 0x0002);
829
830                 for (i = 0; i < 6; i++)
831                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
832                                      test_pat[chan][i]);
833
834                 tg3_writephy(tp, 0x16, 0x0202);
835                 if (tg3_wait_macro_done(tp)) {
836                         *resetp = 1;
837                         return -EBUSY;
838                 }
839
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
841                              (chan * 0x2000) | 0x0200);
842                 tg3_writephy(tp, 0x16, 0x0082);
843                 if (tg3_wait_macro_done(tp)) {
844                         *resetp = 1;
845                         return -EBUSY;
846                 }
847
848                 tg3_writephy(tp, 0x16, 0x0802);
849                 if (tg3_wait_macro_done(tp)) {
850                         *resetp = 1;
851                         return -EBUSY;
852                 }
853
854                 for (i = 0; i < 6; i += 2) {
855                         u32 low, high;
856
857                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
858                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
859                             tg3_wait_macro_done(tp)) {
860                                 *resetp = 1;
861                                 return -EBUSY;
862                         }
863                         low &= 0x7fff;
864                         high &= 0x000f;
865                         if (low != test_pat[chan][i] ||
866                             high != test_pat[chan][i+1]) {
867                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
868                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
869                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
870
871                                 return -EBUSY;
872                         }
873                 }
874         }
875
876         return 0;
877 }
878
879 static int tg3_phy_reset_chanpat(struct tg3 *tp)
880 {
881         int chan;
882
883         for (chan = 0; chan < 4; chan++) {
884                 int i;
885
886                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
887                              (chan * 0x2000) | 0x0200);
888                 tg3_writephy(tp, 0x16, 0x0002);
889                 for (i = 0; i < 6; i++)
890                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
891                 tg3_writephy(tp, 0x16, 0x0202);
892                 if (tg3_wait_macro_done(tp))
893                         return -EBUSY;
894         }
895
896         return 0;
897 }
898
899 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
900 {
901         u32 reg32, phy9_orig;
902         int retries, do_phy_reset, err;
903
904         retries = 10;
905         do_phy_reset = 1;
906         do {
907                 if (do_phy_reset) {
908                         err = tg3_bmcr_reset(tp);
909                         if (err)
910                                 return err;
911                         do_phy_reset = 0;
912                 }
913
914                 /* Disable transmitter and interrupt.  */
915                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
916                         continue;
917
918                 reg32 |= 0x3000;
919                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
920
921                 /* Set full-duplex, 1000 mbps.  */
922                 tg3_writephy(tp, MII_BMCR,
923                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
924
925                 /* Set to master mode.  */
926                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
927                         continue;
928
929                 tg3_writephy(tp, MII_TG3_CTRL,
930                              (MII_TG3_CTRL_AS_MASTER |
931                               MII_TG3_CTRL_ENABLE_AS_MASTER));
932
933                 /* Enable SM_DSP_CLOCK and 6dB.  */
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
935
936                 /* Block the PHY control access.  */
937                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
938                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
939
940                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
941                 if (!err)
942                         break;
943         } while (--retries);
944
945         err = tg3_phy_reset_chanpat(tp);
946         if (err)
947                 return err;
948
949         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
950         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
951
952         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
953         tg3_writephy(tp, 0x16, 0x0000);
954
955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
957                 /* Set Extended packet length bit for jumbo frames */
958                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
959         }
960         else {
961                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
962         }
963
964         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
965
966         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
967                 reg32 &= ~0x3000;
968                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
969         } else if (!err)
970                 err = -EBUSY;
971
972         return err;
973 }
974
975 static void tg3_link_report(struct tg3 *);
976
977 /* This will reset the tigon3 PHY if there is no valid
978  * link unless the FORCE argument is non-zero.
979  */
980 static int tg3_phy_reset(struct tg3 *tp)
981 {
982         u32 phy_status;
983         int err;
984
985         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
986         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
987         if (err != 0)
988                 return -EBUSY;
989
990         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
991                 netif_carrier_off(tp->dev);
992                 tg3_link_report(tp);
993         }
994
995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
996             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
998                 err = tg3_phy_reset_5703_4_5(tp);
999                 if (err)
1000                         return err;
1001                 goto out;
1002         }
1003
1004         err = tg3_bmcr_reset(tp);
1005         if (err)
1006                 return err;
1007
1008 out:
1009         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1014                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1015                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1016         }
1017         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1018                 tg3_writephy(tp, 0x1c, 0x8d68);
1019                 tg3_writephy(tp, 0x1c, 0x8d68);
1020         }
1021         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1022                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1023                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1024                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1025                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1026                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1027                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1028                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1029                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1030         }
1031         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1032                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1033                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1034                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1035                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1036         }
1037         /* Set Extended packet length bit (bit 14) on all chips that */
1038         /* support jumbo frames */
1039         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1040                 /* Cannot do read-modify-write on 5401 */
1041                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1042         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1043                 u32 phy_reg;
1044
1045                 /* Set bit 14 with read-modify-write to preserve other bits */
1046                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1047                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1048                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1049         }
1050
1051         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1052          * jumbo frames transmission.
1053          */
1054         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1055                 u32 phy_reg;
1056
1057                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1058                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1059                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1060         }
1061
1062         tg3_phy_set_wirespeed(tp);
1063         return 0;
1064 }
1065
1066 static void tg3_frob_aux_power(struct tg3 *tp)
1067 {
1068         struct tg3 *tp_peer = tp;
1069
1070         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1071                 return;
1072
1073         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1074             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1075                 struct net_device *dev_peer;
1076
1077                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1078                 /* remove_one() may have been run on the peer. */
1079                 if (!dev_peer)
1080                         tp_peer = tp;
1081                 else
1082                         tp_peer = netdev_priv(dev_peer);
1083         }
1084
1085         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1086             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1087             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1088             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1089                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1090                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1091                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1092                                     (GRC_LCLCTRL_GPIO_OE0 |
1093                                      GRC_LCLCTRL_GPIO_OE1 |
1094                                      GRC_LCLCTRL_GPIO_OE2 |
1095                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1096                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1097                                     100);
1098                 } else {
1099                         u32 no_gpio2;
1100                         u32 grc_local_ctrl = 0;
1101
1102                         if (tp_peer != tp &&
1103                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1104                                 return;
1105
1106                         /* Workaround to prevent overdrawing Amps. */
1107                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1108                             ASIC_REV_5714) {
1109                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1110                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1111                                             grc_local_ctrl, 100);
1112                         }
1113
1114                         /* On 5753 and variants, GPIO2 cannot be used. */
1115                         no_gpio2 = tp->nic_sram_data_cfg &
1116                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1117
1118                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1119                                          GRC_LCLCTRL_GPIO_OE1 |
1120                                          GRC_LCLCTRL_GPIO_OE2 |
1121                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1122                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1123                         if (no_gpio2) {
1124                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1125                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1126                         }
1127                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1128                                                     grc_local_ctrl, 100);
1129
1130                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1131
1132                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1133                                                     grc_local_ctrl, 100);
1134
1135                         if (!no_gpio2) {
1136                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1137                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                             grc_local_ctrl, 100);
1139                         }
1140                 }
1141         } else {
1142                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1143                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1144                         if (tp_peer != tp &&
1145                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1146                                 return;
1147
1148                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1149                                     (GRC_LCLCTRL_GPIO_OE1 |
1150                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1151
1152                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153                                     GRC_LCLCTRL_GPIO_OE1, 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     (GRC_LCLCTRL_GPIO_OE1 |
1157                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1158                 }
1159         }
1160 }
1161
1162 static int tg3_setup_phy(struct tg3 *, int);
1163
1164 #define RESET_KIND_SHUTDOWN     0
1165 #define RESET_KIND_INIT         1
1166 #define RESET_KIND_SUSPEND      2
1167
1168 static void tg3_write_sig_post_reset(struct tg3 *, int);
1169 static int tg3_halt_cpu(struct tg3 *, u32);
1170 static int tg3_nvram_lock(struct tg3 *);
1171 static void tg3_nvram_unlock(struct tg3 *);
1172
1173 static void tg3_power_down_phy(struct tg3 *tp)
1174 {
1175         /* The PHY should not be powered down on some chips because
1176          * of bugs.
1177          */
1178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1180             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1181              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1182                 return;
1183         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1184 }
1185
1186 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1187 {
1188         u32 misc_host_ctrl;
1189         u16 power_control, power_caps;
1190         int pm = tp->pm_cap;
1191
1192         /* Make sure register accesses (indirect or otherwise)
1193          * will function correctly.
1194          */
1195         pci_write_config_dword(tp->pdev,
1196                                TG3PCI_MISC_HOST_CTRL,
1197                                tp->misc_host_ctrl);
1198
1199         pci_read_config_word(tp->pdev,
1200                              pm + PCI_PM_CTRL,
1201                              &power_control);
1202         power_control |= PCI_PM_CTRL_PME_STATUS;
1203         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1204         switch (state) {
1205         case PCI_D0:
1206                 power_control |= 0;
1207                 pci_write_config_word(tp->pdev,
1208                                       pm + PCI_PM_CTRL,
1209                                       power_control);
1210                 udelay(100);    /* Delay after power state change */
1211
1212                 /* Switch out of Vaux if it is not a LOM */
1213                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1214                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1215
1216                 return 0;
1217
1218         case PCI_D1:
1219                 power_control |= 1;
1220                 break;
1221
1222         case PCI_D2:
1223                 power_control |= 2;
1224                 break;
1225
1226         case PCI_D3hot:
1227                 power_control |= 3;
1228                 break;
1229
1230         default:
1231                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1232                        "requested.\n",
1233                        tp->dev->name, state);
1234                 return -EINVAL;
1235         };
1236
1237         power_control |= PCI_PM_CTRL_PME_ENABLE;
1238
1239         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1240         tw32(TG3PCI_MISC_HOST_CTRL,
1241              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1242
1243         if (tp->link_config.phy_is_low_power == 0) {
1244                 tp->link_config.phy_is_low_power = 1;
1245                 tp->link_config.orig_speed = tp->link_config.speed;
1246                 tp->link_config.orig_duplex = tp->link_config.duplex;
1247                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1248         }
1249
1250         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1251                 tp->link_config.speed = SPEED_10;
1252                 tp->link_config.duplex = DUPLEX_HALF;
1253                 tp->link_config.autoneg = AUTONEG_ENABLE;
1254                 tg3_setup_phy(tp, 0);
1255         }
1256
1257         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1258                 int i;
1259                 u32 val;
1260
1261                 for (i = 0; i < 200; i++) {
1262                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1263                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1264                                 break;
1265                         msleep(1);
1266                 }
1267         }
1268         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1269                                              WOL_DRV_STATE_SHUTDOWN |
1270                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1271
1272         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1273
1274         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1275                 u32 mac_mode;
1276
1277                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1278                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1279                         udelay(40);
1280
1281                         mac_mode = MAC_MODE_PORT_MODE_MII;
1282
1283                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1284                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1285                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1286                 } else {
1287                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1288                 }
1289
1290                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1291                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1292
1293                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1294                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1295                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1296
1297                 tw32_f(MAC_MODE, mac_mode);
1298                 udelay(100);
1299
1300                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1301                 udelay(10);
1302         }
1303
1304         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1305             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1306              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1307                 u32 base_val;
1308
1309                 base_val = tp->pci_clock_ctrl;
1310                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1311                              CLOCK_CTRL_TXCLK_DISABLE);
1312
1313                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1314                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1315         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1316                 /* do nothing */
1317         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1318                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1319                 u32 newbits1, newbits2;
1320
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1322                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1323                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1324                                     CLOCK_CTRL_TXCLK_DISABLE |
1325                                     CLOCK_CTRL_ALTCLK);
1326                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1327                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1328                         newbits1 = CLOCK_CTRL_625_CORE;
1329                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1330                 } else {
1331                         newbits1 = CLOCK_CTRL_ALTCLK;
1332                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1333                 }
1334
1335                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1336                             40);
1337
1338                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1339                             40);
1340
1341                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1342                         u32 newbits3;
1343
1344                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1345                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1346                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1347                                             CLOCK_CTRL_TXCLK_DISABLE |
1348                                             CLOCK_CTRL_44MHZ_CORE);
1349                         } else {
1350                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1351                         }
1352
1353                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1354                                     tp->pci_clock_ctrl | newbits3, 40);
1355                 }
1356         }
1357
1358         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1359             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1360                 /* Turn off the PHY */
1361                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1362                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1363                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1364                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1365                         tg3_power_down_phy(tp);
1366                 }
1367         }
1368
1369         tg3_frob_aux_power(tp);
1370
1371         /* Workaround for unstable PLL clock */
1372         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1373             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1374                 u32 val = tr32(0x7d00);
1375
1376                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1377                 tw32(0x7d00, val);
1378                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1379                         int err;
1380
1381                         err = tg3_nvram_lock(tp);
1382                         tg3_halt_cpu(tp, RX_CPU_BASE);
1383                         if (!err)
1384                                 tg3_nvram_unlock(tp);
1385                 }
1386         }
1387
1388         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1389
1390         /* Finally, set the new power state. */
1391         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1392         udelay(100);    /* Delay after power state change */
1393
1394         return 0;
1395 }
1396
1397 static void tg3_link_report(struct tg3 *tp)
1398 {
1399         if (!netif_carrier_ok(tp->dev)) {
1400                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1401         } else {
1402                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1403                        tp->dev->name,
1404                        (tp->link_config.active_speed == SPEED_1000 ?
1405                         1000 :
1406                         (tp->link_config.active_speed == SPEED_100 ?
1407                          100 : 10)),
1408                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1409                         "full" : "half"));
1410
1411                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1412                        "%s for RX.\n",
1413                        tp->dev->name,
1414                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1415                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1416         }
1417 }
1418
1419 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1420 {
1421         u32 new_tg3_flags = 0;
1422         u32 old_rx_mode = tp->rx_mode;
1423         u32 old_tx_mode = tp->tx_mode;
1424
1425         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1426
1427                 /* Convert 1000BaseX flow control bits to 1000BaseT
1428                  * bits before resolving flow control.
1429                  */
1430                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1431                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1432                                        ADVERTISE_PAUSE_ASYM);
1433                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1434
1435                         if (local_adv & ADVERTISE_1000XPAUSE)
1436                                 local_adv |= ADVERTISE_PAUSE_CAP;
1437                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1438                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1439                         if (remote_adv & LPA_1000XPAUSE)
1440                                 remote_adv |= LPA_PAUSE_CAP;
1441                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1442                                 remote_adv |= LPA_PAUSE_ASYM;
1443                 }
1444
1445                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1446                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1447                                 if (remote_adv & LPA_PAUSE_CAP)
1448                                         new_tg3_flags |=
1449                                                 (TG3_FLAG_RX_PAUSE |
1450                                                 TG3_FLAG_TX_PAUSE);
1451                                 else if (remote_adv & LPA_PAUSE_ASYM)
1452                                         new_tg3_flags |=
1453                                                 (TG3_FLAG_RX_PAUSE);
1454                         } else {
1455                                 if (remote_adv & LPA_PAUSE_CAP)
1456                                         new_tg3_flags |=
1457                                                 (TG3_FLAG_RX_PAUSE |
1458                                                 TG3_FLAG_TX_PAUSE);
1459                         }
1460                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1461                         if ((remote_adv & LPA_PAUSE_CAP) &&
1462                         (remote_adv & LPA_PAUSE_ASYM))
1463                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1464                 }
1465
1466                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1467                 tp->tg3_flags |= new_tg3_flags;
1468         } else {
1469                 new_tg3_flags = tp->tg3_flags;
1470         }
1471
1472         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1473                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1474         else
1475                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1476
1477         if (old_rx_mode != tp->rx_mode) {
1478                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1479         }
1480         
1481         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1482                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1483         else
1484                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1485
1486         if (old_tx_mode != tp->tx_mode) {
1487                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1488         }
1489 }
1490
1491 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1492 {
1493         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1494         case MII_TG3_AUX_STAT_10HALF:
1495                 *speed = SPEED_10;
1496                 *duplex = DUPLEX_HALF;
1497                 break;
1498
1499         case MII_TG3_AUX_STAT_10FULL:
1500                 *speed = SPEED_10;
1501                 *duplex = DUPLEX_FULL;
1502                 break;
1503
1504         case MII_TG3_AUX_STAT_100HALF:
1505                 *speed = SPEED_100;
1506                 *duplex = DUPLEX_HALF;
1507                 break;
1508
1509         case MII_TG3_AUX_STAT_100FULL:
1510                 *speed = SPEED_100;
1511                 *duplex = DUPLEX_FULL;
1512                 break;
1513
1514         case MII_TG3_AUX_STAT_1000HALF:
1515                 *speed = SPEED_1000;
1516                 *duplex = DUPLEX_HALF;
1517                 break;
1518
1519         case MII_TG3_AUX_STAT_1000FULL:
1520                 *speed = SPEED_1000;
1521                 *duplex = DUPLEX_FULL;
1522                 break;
1523
1524         default:
1525                 *speed = SPEED_INVALID;
1526                 *duplex = DUPLEX_INVALID;
1527                 break;
1528         };
1529 }
1530
1531 static void tg3_phy_copper_begin(struct tg3 *tp)
1532 {
1533         u32 new_adv;
1534         int i;
1535
1536         if (tp->link_config.phy_is_low_power) {
1537                 /* Entering low power mode.  Disable gigabit and
1538                  * 100baseT advertisements.
1539                  */
1540                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1541
1542                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1543                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1544                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1545                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1546
1547                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1548         } else if (tp->link_config.speed == SPEED_INVALID) {
1549                 tp->link_config.advertising =
1550                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1551                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1552                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1553                          ADVERTISED_Autoneg | ADVERTISED_MII);
1554
1555                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1556                         tp->link_config.advertising &=
1557                                 ~(ADVERTISED_1000baseT_Half |
1558                                   ADVERTISED_1000baseT_Full);
1559
1560                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1561                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1562                         new_adv |= ADVERTISE_10HALF;
1563                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1564                         new_adv |= ADVERTISE_10FULL;
1565                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1566                         new_adv |= ADVERTISE_100HALF;
1567                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1568                         new_adv |= ADVERTISE_100FULL;
1569                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1570
1571                 if (tp->link_config.advertising &
1572                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1573                         new_adv = 0;
1574                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1575                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1576                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1577                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1578                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1579                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1580                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1581                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1582                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1583                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1584                 } else {
1585                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1586                 }
1587         } else {
1588                 /* Asking for a specific link mode. */
1589                 if (tp->link_config.speed == SPEED_1000) {
1590                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1591                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1592
1593                         if (tp->link_config.duplex == DUPLEX_FULL)
1594                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1595                         else
1596                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1597                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1598                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1599                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1600                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1601                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1602                 } else {
1603                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1604
1605                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1606                         if (tp->link_config.speed == SPEED_100) {
1607                                 if (tp->link_config.duplex == DUPLEX_FULL)
1608                                         new_adv |= ADVERTISE_100FULL;
1609                                 else
1610                                         new_adv |= ADVERTISE_100HALF;
1611                         } else {
1612                                 if (tp->link_config.duplex == DUPLEX_FULL)
1613                                         new_adv |= ADVERTISE_10FULL;
1614                                 else
1615                                         new_adv |= ADVERTISE_10HALF;
1616                         }
1617                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1618                 }
1619         }
1620
1621         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1622             tp->link_config.speed != SPEED_INVALID) {
1623                 u32 bmcr, orig_bmcr;
1624
1625                 tp->link_config.active_speed = tp->link_config.speed;
1626                 tp->link_config.active_duplex = tp->link_config.duplex;
1627
1628                 bmcr = 0;
1629                 switch (tp->link_config.speed) {
1630                 default:
1631                 case SPEED_10:
1632                         break;
1633
1634                 case SPEED_100:
1635                         bmcr |= BMCR_SPEED100;
1636                         break;
1637
1638                 case SPEED_1000:
1639                         bmcr |= TG3_BMCR_SPEED1000;
1640                         break;
1641                 };
1642
1643                 if (tp->link_config.duplex == DUPLEX_FULL)
1644                         bmcr |= BMCR_FULLDPLX;
1645
1646                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1647                     (bmcr != orig_bmcr)) {
1648                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1649                         for (i = 0; i < 1500; i++) {
1650                                 u32 tmp;
1651
1652                                 udelay(10);
1653                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1654                                     tg3_readphy(tp, MII_BMSR, &tmp))
1655                                         continue;
1656                                 if (!(tmp & BMSR_LSTATUS)) {
1657                                         udelay(40);
1658                                         break;
1659                                 }
1660                         }
1661                         tg3_writephy(tp, MII_BMCR, bmcr);
1662                         udelay(40);
1663                 }
1664         } else {
1665                 tg3_writephy(tp, MII_BMCR,
1666                              BMCR_ANENABLE | BMCR_ANRESTART);
1667         }
1668 }
1669
1670 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1671 {
1672         int err;
1673
1674         /* Turn off tap power management. */
1675         /* Set Extended packet length bit */
1676         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1677
1678         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1679         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1680
1681         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1682         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1683
1684         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1685         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1686
1687         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1689
1690         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1691         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1692
1693         udelay(40);
1694
1695         return err;
1696 }
1697
1698 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1699 {
1700         u32 adv_reg, all_mask;
1701
1702         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1703                 return 0;
1704
1705         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1706                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1707         if ((adv_reg & all_mask) != all_mask)
1708                 return 0;
1709         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1710                 u32 tg3_ctrl;
1711
1712                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1713                         return 0;
1714
1715                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1716                             MII_TG3_CTRL_ADV_1000_FULL);
1717                 if ((tg3_ctrl & all_mask) != all_mask)
1718                         return 0;
1719         }
1720         return 1;
1721 }
1722
1723 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1724 {
1725         int current_link_up;
1726         u32 bmsr, dummy;
1727         u16 current_speed;
1728         u8 current_duplex;
1729         int i, err;
1730
1731         tw32(MAC_EVENT, 0);
1732
1733         tw32_f(MAC_STATUS,
1734              (MAC_STATUS_SYNC_CHANGED |
1735               MAC_STATUS_CFG_CHANGED |
1736               MAC_STATUS_MI_COMPLETION |
1737               MAC_STATUS_LNKSTATE_CHANGED));
1738         udelay(40);
1739
1740         tp->mi_mode = MAC_MI_MODE_BASE;
1741         tw32_f(MAC_MI_MODE, tp->mi_mode);
1742         udelay(80);
1743
1744         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1745
1746         /* Some third-party PHYs need to be reset on link going
1747          * down.
1748          */
1749         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1750              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1751              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1752             netif_carrier_ok(tp->dev)) {
1753                 tg3_readphy(tp, MII_BMSR, &bmsr);
1754                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1755                     !(bmsr & BMSR_LSTATUS))
1756                         force_reset = 1;
1757         }
1758         if (force_reset)
1759                 tg3_phy_reset(tp);
1760
1761         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1762                 tg3_readphy(tp, MII_BMSR, &bmsr);
1763                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1764                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1765                         bmsr = 0;
1766
1767                 if (!(bmsr & BMSR_LSTATUS)) {
1768                         err = tg3_init_5401phy_dsp(tp);
1769                         if (err)
1770                                 return err;
1771
1772                         tg3_readphy(tp, MII_BMSR, &bmsr);
1773                         for (i = 0; i < 1000; i++) {
1774                                 udelay(10);
1775                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1776                                     (bmsr & BMSR_LSTATUS)) {
1777                                         udelay(40);
1778                                         break;
1779                                 }
1780                         }
1781
1782                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1783                             !(bmsr & BMSR_LSTATUS) &&
1784                             tp->link_config.active_speed == SPEED_1000) {
1785                                 err = tg3_phy_reset(tp);
1786                                 if (!err)
1787                                         err = tg3_init_5401phy_dsp(tp);
1788                                 if (err)
1789                                         return err;
1790                         }
1791                 }
1792         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1793                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1794                 /* 5701 {A0,B0} CRC bug workaround */
1795                 tg3_writephy(tp, 0x15, 0x0a75);
1796                 tg3_writephy(tp, 0x1c, 0x8c68);
1797                 tg3_writephy(tp, 0x1c, 0x8d68);
1798                 tg3_writephy(tp, 0x1c, 0x8c68);
1799         }
1800
1801         /* Clear pending interrupts... */
1802         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1803         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1804
1805         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1806                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1807         else
1808                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1809
1810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1812                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1813                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1814                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1815                 else
1816                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1817         }
1818
1819         current_link_up = 0;
1820         current_speed = SPEED_INVALID;
1821         current_duplex = DUPLEX_INVALID;
1822
1823         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1824                 u32 val;
1825
1826                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1827                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1828                 if (!(val & (1 << 10))) {
1829                         val |= (1 << 10);
1830                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1831                         goto relink;
1832                 }
1833         }
1834
1835         bmsr = 0;
1836         for (i = 0; i < 100; i++) {
1837                 tg3_readphy(tp, MII_BMSR, &bmsr);
1838                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1839                     (bmsr & BMSR_LSTATUS))
1840                         break;
1841                 udelay(40);
1842         }
1843
1844         if (bmsr & BMSR_LSTATUS) {
1845                 u32 aux_stat, bmcr;
1846
1847                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1848                 for (i = 0; i < 2000; i++) {
1849                         udelay(10);
1850                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1851                             aux_stat)
1852                                 break;
1853                 }
1854
1855                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1856                                              &current_speed,
1857                                              &current_duplex);
1858
1859                 bmcr = 0;
1860                 for (i = 0; i < 200; i++) {
1861                         tg3_readphy(tp, MII_BMCR, &bmcr);
1862                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1863                                 continue;
1864                         if (bmcr && bmcr != 0x7fff)
1865                                 break;
1866                         udelay(10);
1867                 }
1868
1869                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1870                         if (bmcr & BMCR_ANENABLE) {
1871                                 current_link_up = 1;
1872
1873                                 /* Force autoneg restart if we are exiting
1874                                  * low power mode.
1875                                  */
1876                                 if (!tg3_copper_is_advertising_all(tp))
1877                                         current_link_up = 0;
1878                         } else {
1879                                 current_link_up = 0;
1880                         }
1881                 } else {
1882                         if (!(bmcr & BMCR_ANENABLE) &&
1883                             tp->link_config.speed == current_speed &&
1884                             tp->link_config.duplex == current_duplex) {
1885                                 current_link_up = 1;
1886                         } else {
1887                                 current_link_up = 0;
1888                         }
1889                 }
1890
1891                 tp->link_config.active_speed = current_speed;
1892                 tp->link_config.active_duplex = current_duplex;
1893         }
1894
1895         if (current_link_up == 1 &&
1896             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1897             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1898                 u32 local_adv, remote_adv;
1899
1900                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1901                         local_adv = 0;
1902                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1903
1904                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1905                         remote_adv = 0;
1906
1907                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1908
1909                 /* If we are not advertising full pause capability,
1910                  * something is wrong.  Bring the link down and reconfigure.
1911                  */
1912                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1913                         current_link_up = 0;
1914                 } else {
1915                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1916                 }
1917         }
1918 relink:
1919         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1920                 u32 tmp;
1921
1922                 tg3_phy_copper_begin(tp);
1923
1924                 tg3_readphy(tp, MII_BMSR, &tmp);
1925                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1926                     (tmp & BMSR_LSTATUS))
1927                         current_link_up = 1;
1928         }
1929
1930         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1931         if (current_link_up == 1) {
1932                 if (tp->link_config.active_speed == SPEED_100 ||
1933                     tp->link_config.active_speed == SPEED_10)
1934                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1935                 else
1936                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1937         } else
1938                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1939
1940         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1941         if (tp->link_config.active_duplex == DUPLEX_HALF)
1942                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1943
1944         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1946                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1947                     (current_link_up == 1 &&
1948                      tp->link_config.active_speed == SPEED_10))
1949                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1950         } else {
1951                 if (current_link_up == 1)
1952                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1953         }
1954
1955         /* ??? Without this setting Netgear GA302T PHY does not
1956          * ??? send/receive packets...
1957          */
1958         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1959             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1960                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1961                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1962                 udelay(80);
1963         }
1964
1965         tw32_f(MAC_MODE, tp->mac_mode);
1966         udelay(40);
1967
1968         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1969                 /* Polled via timer. */
1970                 tw32_f(MAC_EVENT, 0);
1971         } else {
1972                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1973         }
1974         udelay(40);
1975
1976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1977             current_link_up == 1 &&
1978             tp->link_config.active_speed == SPEED_1000 &&
1979             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1980              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1981                 udelay(120);
1982                 tw32_f(MAC_STATUS,
1983                      (MAC_STATUS_SYNC_CHANGED |
1984                       MAC_STATUS_CFG_CHANGED));
1985                 udelay(40);
1986                 tg3_write_mem(tp,
1987                               NIC_SRAM_FIRMWARE_MBOX,
1988                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1989         }
1990
1991         if (current_link_up != netif_carrier_ok(tp->dev)) {
1992                 if (current_link_up)
1993                         netif_carrier_on(tp->dev);
1994                 else
1995                         netif_carrier_off(tp->dev);
1996                 tg3_link_report(tp);
1997         }
1998
1999         return 0;
2000 }
2001
2002 struct tg3_fiber_aneginfo {
2003         int state;
2004 #define ANEG_STATE_UNKNOWN              0
2005 #define ANEG_STATE_AN_ENABLE            1
2006 #define ANEG_STATE_RESTART_INIT         2
2007 #define ANEG_STATE_RESTART              3
2008 #define ANEG_STATE_DISABLE_LINK_OK      4
2009 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2010 #define ANEG_STATE_ABILITY_DETECT       6
2011 #define ANEG_STATE_ACK_DETECT_INIT      7
2012 #define ANEG_STATE_ACK_DETECT           8
2013 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2014 #define ANEG_STATE_COMPLETE_ACK         10
2015 #define ANEG_STATE_IDLE_DETECT_INIT     11
2016 #define ANEG_STATE_IDLE_DETECT          12
2017 #define ANEG_STATE_LINK_OK              13
2018 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2019 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2020
2021         u32 flags;
2022 #define MR_AN_ENABLE            0x00000001
2023 #define MR_RESTART_AN           0x00000002
2024 #define MR_AN_COMPLETE          0x00000004
2025 #define MR_PAGE_RX              0x00000008
2026 #define MR_NP_LOADED            0x00000010
2027 #define MR_TOGGLE_TX            0x00000020
2028 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2029 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2030 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2031 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2032 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2033 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2034 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2035 #define MR_TOGGLE_RX            0x00002000
2036 #define MR_NP_RX                0x00004000
2037
2038 #define MR_LINK_OK              0x80000000
2039
2040         unsigned long link_time, cur_time;
2041
2042         u32 ability_match_cfg;
2043         int ability_match_count;
2044
2045         char ability_match, idle_match, ack_match;
2046
2047         u32 txconfig, rxconfig;
2048 #define ANEG_CFG_NP             0x00000080
2049 #define ANEG_CFG_ACK            0x00000040
2050 #define ANEG_CFG_RF2            0x00000020
2051 #define ANEG_CFG_RF1            0x00000010
2052 #define ANEG_CFG_PS2            0x00000001
2053 #define ANEG_CFG_PS1            0x00008000
2054 #define ANEG_CFG_HD             0x00004000
2055 #define ANEG_CFG_FD             0x00002000
2056 #define ANEG_CFG_INVAL          0x00001f06
2057
2058 };
2059 #define ANEG_OK         0
2060 #define ANEG_DONE       1
2061 #define ANEG_TIMER_ENAB 2
2062 #define ANEG_FAILED     -1
2063
2064 #define ANEG_STATE_SETTLE_TIME  10000
2065
2066 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2067                                    struct tg3_fiber_aneginfo *ap)
2068 {
2069         unsigned long delta;
2070         u32 rx_cfg_reg;
2071         int ret;
2072
2073         if (ap->state == ANEG_STATE_UNKNOWN) {
2074                 ap->rxconfig = 0;
2075                 ap->link_time = 0;
2076                 ap->cur_time = 0;
2077                 ap->ability_match_cfg = 0;
2078                 ap->ability_match_count = 0;
2079                 ap->ability_match = 0;
2080                 ap->idle_match = 0;
2081                 ap->ack_match = 0;
2082         }
2083         ap->cur_time++;
2084
2085         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2086                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2087
2088                 if (rx_cfg_reg != ap->ability_match_cfg) {
2089                         ap->ability_match_cfg = rx_cfg_reg;
2090                         ap->ability_match = 0;
2091                         ap->ability_match_count = 0;
2092                 } else {
2093                         if (++ap->ability_match_count > 1) {
2094                                 ap->ability_match = 1;
2095                                 ap->ability_match_cfg = rx_cfg_reg;
2096                         }
2097                 }
2098                 if (rx_cfg_reg & ANEG_CFG_ACK)
2099                         ap->ack_match = 1;
2100                 else
2101                         ap->ack_match = 0;
2102
2103                 ap->idle_match = 0;
2104         } else {
2105                 ap->idle_match = 1;
2106                 ap->ability_match_cfg = 0;
2107                 ap->ability_match_count = 0;
2108                 ap->ability_match = 0;
2109                 ap->ack_match = 0;
2110
2111                 rx_cfg_reg = 0;
2112         }
2113
2114         ap->rxconfig = rx_cfg_reg;
2115         ret = ANEG_OK;
2116
2117         switch(ap->state) {
2118         case ANEG_STATE_UNKNOWN:
2119                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2120                         ap->state = ANEG_STATE_AN_ENABLE;
2121
2122                 /* fallthru */
2123         case ANEG_STATE_AN_ENABLE:
2124                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2125                 if (ap->flags & MR_AN_ENABLE) {
2126                         ap->link_time = 0;
2127                         ap->cur_time = 0;
2128                         ap->ability_match_cfg = 0;
2129                         ap->ability_match_count = 0;
2130                         ap->ability_match = 0;
2131                         ap->idle_match = 0;
2132                         ap->ack_match = 0;
2133
2134                         ap->state = ANEG_STATE_RESTART_INIT;
2135                 } else {
2136                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2137                 }
2138                 break;
2139
2140         case ANEG_STATE_RESTART_INIT:
2141                 ap->link_time = ap->cur_time;
2142                 ap->flags &= ~(MR_NP_LOADED);
2143                 ap->txconfig = 0;
2144                 tw32(MAC_TX_AUTO_NEG, 0);
2145                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2146                 tw32_f(MAC_MODE, tp->mac_mode);
2147                 udelay(40);
2148
2149                 ret = ANEG_TIMER_ENAB;
2150                 ap->state = ANEG_STATE_RESTART;
2151
2152                 /* fallthru */
2153         case ANEG_STATE_RESTART:
2154                 delta = ap->cur_time - ap->link_time;
2155                 if (delta > ANEG_STATE_SETTLE_TIME) {
2156                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2157                 } else {
2158                         ret = ANEG_TIMER_ENAB;
2159                 }
2160                 break;
2161
2162         case ANEG_STATE_DISABLE_LINK_OK:
2163                 ret = ANEG_DONE;
2164                 break;
2165
2166         case ANEG_STATE_ABILITY_DETECT_INIT:
2167                 ap->flags &= ~(MR_TOGGLE_TX);
2168                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2169                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2170                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2171                 tw32_f(MAC_MODE, tp->mac_mode);
2172                 udelay(40);
2173
2174                 ap->state = ANEG_STATE_ABILITY_DETECT;
2175                 break;
2176
2177         case ANEG_STATE_ABILITY_DETECT:
2178                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2179                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2180                 }
2181                 break;
2182
2183         case ANEG_STATE_ACK_DETECT_INIT:
2184                 ap->txconfig |= ANEG_CFG_ACK;
2185                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2186                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2187                 tw32_f(MAC_MODE, tp->mac_mode);
2188                 udelay(40);
2189
2190                 ap->state = ANEG_STATE_ACK_DETECT;
2191
2192                 /* fallthru */
2193         case ANEG_STATE_ACK_DETECT:
2194                 if (ap->ack_match != 0) {
2195                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2196                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2197                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2198                         } else {
2199                                 ap->state = ANEG_STATE_AN_ENABLE;
2200                         }
2201                 } else if (ap->ability_match != 0 &&
2202                            ap->rxconfig == 0) {
2203                         ap->state = ANEG_STATE_AN_ENABLE;
2204                 }
2205                 break;
2206
2207         case ANEG_STATE_COMPLETE_ACK_INIT:
2208                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2209                         ret = ANEG_FAILED;
2210                         break;
2211                 }
2212                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2213                                MR_LP_ADV_HALF_DUPLEX |
2214                                MR_LP_ADV_SYM_PAUSE |
2215                                MR_LP_ADV_ASYM_PAUSE |
2216                                MR_LP_ADV_REMOTE_FAULT1 |
2217                                MR_LP_ADV_REMOTE_FAULT2 |
2218                                MR_LP_ADV_NEXT_PAGE |
2219                                MR_TOGGLE_RX |
2220                                MR_NP_RX);
2221                 if (ap->rxconfig & ANEG_CFG_FD)
2222                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2223                 if (ap->rxconfig & ANEG_CFG_HD)
2224                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2225                 if (ap->rxconfig & ANEG_CFG_PS1)
2226                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2227                 if (ap->rxconfig & ANEG_CFG_PS2)
2228                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2229                 if (ap->rxconfig & ANEG_CFG_RF1)
2230                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2231                 if (ap->rxconfig & ANEG_CFG_RF2)
2232                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2233                 if (ap->rxconfig & ANEG_CFG_NP)
2234                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2235
2236                 ap->link_time = ap->cur_time;
2237
2238                 ap->flags ^= (MR_TOGGLE_TX);
2239                 if (ap->rxconfig & 0x0008)
2240                         ap->flags |= MR_TOGGLE_RX;
2241                 if (ap->rxconfig & ANEG_CFG_NP)
2242                         ap->flags |= MR_NP_RX;
2243                 ap->flags |= MR_PAGE_RX;
2244
2245                 ap->state = ANEG_STATE_COMPLETE_ACK;
2246                 ret = ANEG_TIMER_ENAB;
2247                 break;
2248
2249         case ANEG_STATE_COMPLETE_ACK:
2250                 if (ap->ability_match != 0 &&
2251                     ap->rxconfig == 0) {
2252                         ap->state = ANEG_STATE_AN_ENABLE;
2253                         break;
2254                 }
2255                 delta = ap->cur_time - ap->link_time;
2256                 if (delta > ANEG_STATE_SETTLE_TIME) {
2257                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2258                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2259                         } else {
2260                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2261                                     !(ap->flags & MR_NP_RX)) {
2262                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263                                 } else {
2264                                         ret = ANEG_FAILED;
2265                                 }
2266                         }
2267                 }
2268                 break;
2269
2270         case ANEG_STATE_IDLE_DETECT_INIT:
2271                 ap->link_time = ap->cur_time;
2272                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2273                 tw32_f(MAC_MODE, tp->mac_mode);
2274                 udelay(40);
2275
2276                 ap->state = ANEG_STATE_IDLE_DETECT;
2277                 ret = ANEG_TIMER_ENAB;
2278                 break;
2279
2280         case ANEG_STATE_IDLE_DETECT:
2281                 if (ap->ability_match != 0 &&
2282                     ap->rxconfig == 0) {
2283                         ap->state = ANEG_STATE_AN_ENABLE;
2284                         break;
2285                 }
2286                 delta = ap->cur_time - ap->link_time;
2287                 if (delta > ANEG_STATE_SETTLE_TIME) {
2288                         /* XXX another gem from the Broadcom driver :( */
2289                         ap->state = ANEG_STATE_LINK_OK;
2290                 }
2291                 break;
2292
2293         case ANEG_STATE_LINK_OK:
2294                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2295                 ret = ANEG_DONE;
2296                 break;
2297
2298         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2299                 /* ??? unimplemented */
2300                 break;
2301
2302         case ANEG_STATE_NEXT_PAGE_WAIT:
2303                 /* ??? unimplemented */
2304                 break;
2305
2306         default:
2307                 ret = ANEG_FAILED;
2308                 break;
2309         };
2310
2311         return ret;
2312 }
2313
2314 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2315 {
2316         int res = 0;
2317         struct tg3_fiber_aneginfo aninfo;
2318         int status = ANEG_FAILED;
2319         unsigned int tick;
2320         u32 tmp;
2321
2322         tw32_f(MAC_TX_AUTO_NEG, 0);
2323
2324         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2325         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2326         udelay(40);
2327
2328         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2329         udelay(40);
2330
2331         memset(&aninfo, 0, sizeof(aninfo));
2332         aninfo.flags |= MR_AN_ENABLE;
2333         aninfo.state = ANEG_STATE_UNKNOWN;
2334         aninfo.cur_time = 0;
2335         tick = 0;
2336         while (++tick < 195000) {
2337                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2338                 if (status == ANEG_DONE || status == ANEG_FAILED)
2339                         break;
2340
2341                 udelay(1);
2342         }
2343
2344         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2345         tw32_f(MAC_MODE, tp->mac_mode);
2346         udelay(40);
2347
2348         *flags = aninfo.flags;
2349
2350         if (status == ANEG_DONE &&
2351             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2352                              MR_LP_ADV_FULL_DUPLEX)))
2353                 res = 1;
2354
2355         return res;
2356 }
2357
2358 static void tg3_init_bcm8002(struct tg3 *tp)
2359 {
2360         u32 mac_status = tr32(MAC_STATUS);
2361         int i;
2362
2363         /* Reset when initting first time or we have a link. */
2364         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2365             !(mac_status & MAC_STATUS_PCS_SYNCED))
2366                 return;
2367
2368         /* Set PLL lock range. */
2369         tg3_writephy(tp, 0x16, 0x8007);
2370
2371         /* SW reset */
2372         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2373
2374         /* Wait for reset to complete. */
2375         /* XXX schedule_timeout() ... */
2376         for (i = 0; i < 500; i++)
2377                 udelay(10);
2378
2379         /* Config mode; select PMA/Ch 1 regs. */
2380         tg3_writephy(tp, 0x10, 0x8411);
2381
2382         /* Enable auto-lock and comdet, select txclk for tx. */
2383         tg3_writephy(tp, 0x11, 0x0a10);
2384
2385         tg3_writephy(tp, 0x18, 0x00a0);
2386         tg3_writephy(tp, 0x16, 0x41ff);
2387
2388         /* Assert and deassert POR. */
2389         tg3_writephy(tp, 0x13, 0x0400);
2390         udelay(40);
2391         tg3_writephy(tp, 0x13, 0x0000);
2392
2393         tg3_writephy(tp, 0x11, 0x0a50);
2394         udelay(40);
2395         tg3_writephy(tp, 0x11, 0x0a10);
2396
2397         /* Wait for signal to stabilize */
2398         /* XXX schedule_timeout() ... */
2399         for (i = 0; i < 15000; i++)
2400                 udelay(10);
2401
2402         /* Deselect the channel register so we can read the PHYID
2403          * later.
2404          */
2405         tg3_writephy(tp, 0x10, 0x8011);
2406 }
2407
2408 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2409 {
2410         u32 sg_dig_ctrl, sg_dig_status;
2411         u32 serdes_cfg, expected_sg_dig_ctrl;
2412         int workaround, port_a;
2413         int current_link_up;
2414
2415         serdes_cfg = 0;
2416         expected_sg_dig_ctrl = 0;
2417         workaround = 0;
2418         port_a = 1;
2419         current_link_up = 0;
2420
2421         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2422             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2423                 workaround = 1;
2424                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2425                         port_a = 0;
2426
2427                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2428                 /* preserve bits 20-23 for voltage regulator */
2429                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2430         }
2431
2432         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2433
2434         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2435                 if (sg_dig_ctrl & (1 << 31)) {
2436                         if (workaround) {
2437                                 u32 val = serdes_cfg;
2438
2439                                 if (port_a)
2440                                         val |= 0xc010000;
2441                                 else
2442                                         val |= 0x4010000;
2443                                 tw32_f(MAC_SERDES_CFG, val);
2444                         }
2445                         tw32_f(SG_DIG_CTRL, 0x01388400);
2446                 }
2447                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2448                         tg3_setup_flow_control(tp, 0, 0);
2449                         current_link_up = 1;
2450                 }
2451                 goto out;
2452         }
2453
2454         /* Want auto-negotiation.  */
2455         expected_sg_dig_ctrl = 0x81388400;
2456
2457         /* Pause capability */
2458         expected_sg_dig_ctrl |= (1 << 11);
2459
2460         /* Asymettric pause */
2461         expected_sg_dig_ctrl |= (1 << 12);
2462
2463         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2464                 if (workaround)
2465                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2466                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2467                 udelay(5);
2468                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2469
2470                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2471         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2472                                  MAC_STATUS_SIGNAL_DET)) {
2473                 int i;
2474
2475                 /* Giver time to negotiate (~200ms) */
2476                 for (i = 0; i < 40000; i++) {
2477                         sg_dig_status = tr32(SG_DIG_STATUS);
2478                         if (sg_dig_status & (0x3))
2479                                 break;
2480                         udelay(5);
2481                 }
2482                 mac_status = tr32(MAC_STATUS);
2483
2484                 if ((sg_dig_status & (1 << 1)) &&
2485                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2486                         u32 local_adv, remote_adv;
2487
2488                         local_adv = ADVERTISE_PAUSE_CAP;
2489                         remote_adv = 0;
2490                         if (sg_dig_status & (1 << 19))
2491                                 remote_adv |= LPA_PAUSE_CAP;
2492                         if (sg_dig_status & (1 << 20))
2493                                 remote_adv |= LPA_PAUSE_ASYM;
2494
2495                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2496                         current_link_up = 1;
2497                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2498                 } else if (!(sg_dig_status & (1 << 1))) {
2499                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2500                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501                         else {
2502                                 if (workaround) {
2503                                         u32 val = serdes_cfg;
2504
2505                                         if (port_a)
2506                                                 val |= 0xc010000;
2507                                         else
2508                                                 val |= 0x4010000;
2509
2510                                         tw32_f(MAC_SERDES_CFG, val);
2511                                 }
2512
2513                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2514                                 udelay(40);
2515
2516                                 /* Link parallel detection - link is up */
2517                                 /* only if we have PCS_SYNC and not */
2518                                 /* receiving config code words */
2519                                 mac_status = tr32(MAC_STATUS);
2520                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2521                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2522                                         tg3_setup_flow_control(tp, 0, 0);
2523                                         current_link_up = 1;
2524                                 }
2525                         }
2526                 }
2527         }
2528
2529 out:
2530         return current_link_up;
2531 }
2532
2533 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2534 {
2535         int current_link_up = 0;
2536
2537         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2538                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2539                 goto out;
2540         }
2541
2542         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2543                 u32 flags;
2544                 int i;
2545   
2546                 if (fiber_autoneg(tp, &flags)) {
2547                         u32 local_adv, remote_adv;
2548
2549                         local_adv = ADVERTISE_PAUSE_CAP;
2550                         remote_adv = 0;
2551                         if (flags & MR_LP_ADV_SYM_PAUSE)
2552                                 remote_adv |= LPA_PAUSE_CAP;
2553                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2554                                 remote_adv |= LPA_PAUSE_ASYM;
2555
2556                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2557
2558                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2559                         current_link_up = 1;
2560                 }
2561                 for (i = 0; i < 30; i++) {
2562                         udelay(20);
2563                         tw32_f(MAC_STATUS,
2564                                (MAC_STATUS_SYNC_CHANGED |
2565                                 MAC_STATUS_CFG_CHANGED));
2566                         udelay(40);
2567                         if ((tr32(MAC_STATUS) &
2568                              (MAC_STATUS_SYNC_CHANGED |
2569                               MAC_STATUS_CFG_CHANGED)) == 0)
2570                                 break;
2571                 }
2572
2573                 mac_status = tr32(MAC_STATUS);
2574                 if (current_link_up == 0 &&
2575                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2576                     !(mac_status & MAC_STATUS_RCVD_CFG))
2577                         current_link_up = 1;
2578         } else {
2579                 /* Forcing 1000FD link up. */
2580                 current_link_up = 1;
2581                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2582
2583                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2584                 udelay(40);
2585         }
2586
2587 out:
2588         return current_link_up;
2589 }
2590
2591 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2592 {
2593         u32 orig_pause_cfg;
2594         u16 orig_active_speed;
2595         u8 orig_active_duplex;
2596         u32 mac_status;
2597         int current_link_up;
2598         int i;
2599
2600         orig_pause_cfg =
2601                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2602                                   TG3_FLAG_TX_PAUSE));
2603         orig_active_speed = tp->link_config.active_speed;
2604         orig_active_duplex = tp->link_config.active_duplex;
2605
2606         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2607             netif_carrier_ok(tp->dev) &&
2608             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2609                 mac_status = tr32(MAC_STATUS);
2610                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2611                                MAC_STATUS_SIGNAL_DET |
2612                                MAC_STATUS_CFG_CHANGED |
2613                                MAC_STATUS_RCVD_CFG);
2614                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2615                                    MAC_STATUS_SIGNAL_DET)) {
2616                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2617                                             MAC_STATUS_CFG_CHANGED));
2618                         return 0;
2619                 }
2620         }
2621
2622         tw32_f(MAC_TX_AUTO_NEG, 0);
2623
2624         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2625         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2626         tw32_f(MAC_MODE, tp->mac_mode);
2627         udelay(40);
2628
2629         if (tp->phy_id == PHY_ID_BCM8002)
2630                 tg3_init_bcm8002(tp);
2631
2632         /* Enable link change event even when serdes polling.  */
2633         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2634         udelay(40);
2635
2636         current_link_up = 0;
2637         mac_status = tr32(MAC_STATUS);
2638
2639         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2640                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2641         else
2642                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2643
2644         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2645         tw32_f(MAC_MODE, tp->mac_mode);
2646         udelay(40);
2647
2648         tp->hw_status->status =
2649                 (SD_STATUS_UPDATED |
2650                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2651
2652         for (i = 0; i < 100; i++) {
2653                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2654                                     MAC_STATUS_CFG_CHANGED));
2655                 udelay(5);
2656                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2657                                          MAC_STATUS_CFG_CHANGED)) == 0)
2658                         break;
2659         }
2660
2661         mac_status = tr32(MAC_STATUS);
2662         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2663                 current_link_up = 0;
2664                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2665                         tw32_f(MAC_MODE, (tp->mac_mode |
2666                                           MAC_MODE_SEND_CONFIGS));
2667                         udelay(1);
2668                         tw32_f(MAC_MODE, tp->mac_mode);
2669                 }
2670         }
2671
2672         if (current_link_up == 1) {
2673                 tp->link_config.active_speed = SPEED_1000;
2674                 tp->link_config.active_duplex = DUPLEX_FULL;
2675                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2676                                     LED_CTRL_LNKLED_OVERRIDE |
2677                                     LED_CTRL_1000MBPS_ON));
2678         } else {
2679                 tp->link_config.active_speed = SPEED_INVALID;
2680                 tp->link_config.active_duplex = DUPLEX_INVALID;
2681                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2682                                     LED_CTRL_LNKLED_OVERRIDE |
2683                                     LED_CTRL_TRAFFIC_OVERRIDE));
2684         }
2685
2686         if (current_link_up != netif_carrier_ok(tp->dev)) {
2687                 if (current_link_up)
2688                         netif_carrier_on(tp->dev);
2689                 else
2690                         netif_carrier_off(tp->dev);
2691                 tg3_link_report(tp);
2692         } else {
2693                 u32 now_pause_cfg =
2694                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2695                                          TG3_FLAG_TX_PAUSE);
2696                 if (orig_pause_cfg != now_pause_cfg ||
2697                     orig_active_speed != tp->link_config.active_speed ||
2698                     orig_active_duplex != tp->link_config.active_duplex)
2699                         tg3_link_report(tp);
2700         }
2701
2702         return 0;
2703 }
2704
2705 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2706 {
2707         int current_link_up, err = 0;
2708         u32 bmsr, bmcr;
2709         u16 current_speed;
2710         u8 current_duplex;
2711
2712         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2713         tw32_f(MAC_MODE, tp->mac_mode);
2714         udelay(40);
2715
2716         tw32(MAC_EVENT, 0);
2717
2718         tw32_f(MAC_STATUS,
2719              (MAC_STATUS_SYNC_CHANGED |
2720               MAC_STATUS_CFG_CHANGED |
2721               MAC_STATUS_MI_COMPLETION |
2722               MAC_STATUS_LNKSTATE_CHANGED));
2723         udelay(40);
2724
2725         if (force_reset)
2726                 tg3_phy_reset(tp);
2727
2728         current_link_up = 0;
2729         current_speed = SPEED_INVALID;
2730         current_duplex = DUPLEX_INVALID;
2731
2732         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2733         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2734         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2735                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2736                         bmsr |= BMSR_LSTATUS;
2737                 else
2738                         bmsr &= ~BMSR_LSTATUS;
2739         }
2740
2741         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2742
2743         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2744             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2745                 /* do nothing, just check for link up at the end */
2746         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2747                 u32 adv, new_adv;
2748
2749                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2750                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2751                                   ADVERTISE_1000XPAUSE |
2752                                   ADVERTISE_1000XPSE_ASYM |
2753                                   ADVERTISE_SLCT);
2754
2755                 /* Always advertise symmetric PAUSE just like copper */
2756                 new_adv |= ADVERTISE_1000XPAUSE;
2757
2758                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2759                         new_adv |= ADVERTISE_1000XHALF;
2760                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2761                         new_adv |= ADVERTISE_1000XFULL;
2762
2763                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2764                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2765                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2766                         tg3_writephy(tp, MII_BMCR, bmcr);
2767
2768                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2769                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2770                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2771
2772                         return err;
2773                 }
2774         } else {
2775                 u32 new_bmcr;
2776
2777                 bmcr &= ~BMCR_SPEED1000;
2778                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2779
2780                 if (tp->link_config.duplex == DUPLEX_FULL)
2781                         new_bmcr |= BMCR_FULLDPLX;
2782
2783                 if (new_bmcr != bmcr) {
2784                         /* BMCR_SPEED1000 is a reserved bit that needs
2785                          * to be set on write.
2786                          */
2787                         new_bmcr |= BMCR_SPEED1000;
2788
2789                         /* Force a linkdown */
2790                         if (netif_carrier_ok(tp->dev)) {
2791                                 u32 adv;
2792
2793                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2794                                 adv &= ~(ADVERTISE_1000XFULL |
2795                                          ADVERTISE_1000XHALF |
2796                                          ADVERTISE_SLCT);
2797                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2798                                 tg3_writephy(tp, MII_BMCR, bmcr |
2799                                                            BMCR_ANRESTART |
2800                                                            BMCR_ANENABLE);
2801                                 udelay(10);
2802                                 netif_carrier_off(tp->dev);
2803                         }
2804                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2805                         bmcr = new_bmcr;
2806                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2807                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2808                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2809                             ASIC_REV_5714) {
2810                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2811                                         bmsr |= BMSR_LSTATUS;
2812                                 else
2813                                         bmsr &= ~BMSR_LSTATUS;
2814                         }
2815                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2816                 }
2817         }
2818
2819         if (bmsr & BMSR_LSTATUS) {
2820                 current_speed = SPEED_1000;
2821                 current_link_up = 1;
2822                 if (bmcr & BMCR_FULLDPLX)
2823                         current_duplex = DUPLEX_FULL;
2824                 else
2825                         current_duplex = DUPLEX_HALF;
2826
2827                 if (bmcr & BMCR_ANENABLE) {
2828                         u32 local_adv, remote_adv, common;
2829
2830                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2831                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2832                         common = local_adv & remote_adv;
2833                         if (common & (ADVERTISE_1000XHALF |
2834                                       ADVERTISE_1000XFULL)) {
2835                                 if (common & ADVERTISE_1000XFULL)
2836                                         current_duplex = DUPLEX_FULL;
2837                                 else
2838                                         current_duplex = DUPLEX_HALF;
2839
2840                                 tg3_setup_flow_control(tp, local_adv,
2841                                                        remote_adv);
2842                         }
2843                         else
2844                                 current_link_up = 0;
2845                 }
2846         }
2847
2848         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2849         if (tp->link_config.active_duplex == DUPLEX_HALF)
2850                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2851
2852         tw32_f(MAC_MODE, tp->mac_mode);
2853         udelay(40);
2854
2855         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2856
2857         tp->link_config.active_speed = current_speed;
2858         tp->link_config.active_duplex = current_duplex;
2859
2860         if (current_link_up != netif_carrier_ok(tp->dev)) {
2861                 if (current_link_up)
2862                         netif_carrier_on(tp->dev);
2863                 else {
2864                         netif_carrier_off(tp->dev);
2865                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2866                 }
2867                 tg3_link_report(tp);
2868         }
2869         return err;
2870 }
2871
2872 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2873 {
2874         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2875                 /* Give autoneg time to complete. */
2876                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2877                 return;
2878         }
2879         if (!netif_carrier_ok(tp->dev) &&
2880             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2881                 u32 bmcr;
2882
2883                 tg3_readphy(tp, MII_BMCR, &bmcr);
2884                 if (bmcr & BMCR_ANENABLE) {
2885                         u32 phy1, phy2;
2886
2887                         /* Select shadow register 0x1f */
2888                         tg3_writephy(tp, 0x1c, 0x7c00);
2889                         tg3_readphy(tp, 0x1c, &phy1);
2890
2891                         /* Select expansion interrupt status register */
2892                         tg3_writephy(tp, 0x17, 0x0f01);
2893                         tg3_readphy(tp, 0x15, &phy2);
2894                         tg3_readphy(tp, 0x15, &phy2);
2895
2896                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2897                                 /* We have signal detect and not receiving
2898                                  * config code words, link is up by parallel
2899                                  * detection.
2900                                  */
2901
2902                                 bmcr &= ~BMCR_ANENABLE;
2903                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2904                                 tg3_writephy(tp, MII_BMCR, bmcr);
2905                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2906                         }
2907                 }
2908         }
2909         else if (netif_carrier_ok(tp->dev) &&
2910                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2911                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2912                 u32 phy2;
2913
2914                 /* Select expansion interrupt status register */
2915                 tg3_writephy(tp, 0x17, 0x0f01);
2916                 tg3_readphy(tp, 0x15, &phy2);
2917                 if (phy2 & 0x20) {
2918                         u32 bmcr;
2919
2920                         /* Config code words received, turn on autoneg. */
2921                         tg3_readphy(tp, MII_BMCR, &bmcr);
2922                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2923
2924                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2925
2926                 }
2927         }
2928 }
2929
2930 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2931 {
2932         int err;
2933
2934         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2935                 err = tg3_setup_fiber_phy(tp, force_reset);
2936         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2937                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2938         } else {
2939                 err = tg3_setup_copper_phy(tp, force_reset);
2940         }
2941
2942         if (tp->link_config.active_speed == SPEED_1000 &&
2943             tp->link_config.active_duplex == DUPLEX_HALF)
2944                 tw32(MAC_TX_LENGTHS,
2945                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2946                       (6 << TX_LENGTHS_IPG_SHIFT) |
2947                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2948         else
2949                 tw32(MAC_TX_LENGTHS,
2950                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2951                       (6 << TX_LENGTHS_IPG_SHIFT) |
2952                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2953
2954         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2955                 if (netif_carrier_ok(tp->dev)) {
2956                         tw32(HOSTCC_STAT_COAL_TICKS,
2957                              tp->coal.stats_block_coalesce_usecs);
2958                 } else {
2959                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2960                 }
2961         }
2962
2963         return err;
2964 }
2965
2966 /* This is called whenever we suspect that the system chipset is re-
2967  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2968  * is bogus tx completions. We try to recover by setting the
2969  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2970  * in the workqueue.
2971  */
2972 static void tg3_tx_recover(struct tg3 *tp)
2973 {
2974         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2975                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2976
2977         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2978                "mapped I/O cycles to the network device, attempting to "
2979                "recover. Please report the problem to the driver maintainer "
2980                "and include system chipset information.\n", tp->dev->name);
2981
2982         spin_lock(&tp->lock);
2983         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2984         spin_unlock(&tp->lock);
2985 }
2986
2987 static inline u32 tg3_tx_avail(struct tg3 *tp)
2988 {
2989         smp_mb();
2990         return (tp->tx_pending -
2991                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2992 }
2993
2994 /* Tigon3 never reports partial packet sends.  So we do not
2995  * need special logic to handle SKBs that have not had all
2996  * of their frags sent yet, like SunGEM does.
2997  */
2998 static void tg3_tx(struct tg3 *tp)
2999 {
3000         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3001         u32 sw_idx = tp->tx_cons;
3002
3003         while (sw_idx != hw_idx) {
3004                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3005                 struct sk_buff *skb = ri->skb;
3006                 int i, tx_bug = 0;
3007
3008                 if (unlikely(skb == NULL)) {
3009                         tg3_tx_recover(tp);
3010                         return;
3011                 }
3012
3013                 pci_unmap_single(tp->pdev,
3014                                  pci_unmap_addr(ri, mapping),
3015                                  skb_headlen(skb),
3016                                  PCI_DMA_TODEVICE);
3017
3018                 ri->skb = NULL;
3019
3020                 sw_idx = NEXT_TX(sw_idx);
3021
3022                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3023                         ri = &tp->tx_buffers[sw_idx];
3024                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3025                                 tx_bug = 1;
3026
3027                         pci_unmap_page(tp->pdev,
3028                                        pci_unmap_addr(ri, mapping),
3029                                        skb_shinfo(skb)->frags[i].size,
3030                                        PCI_DMA_TODEVICE);
3031
3032                         sw_idx = NEXT_TX(sw_idx);
3033                 }
3034
3035                 dev_kfree_skb(skb);
3036
3037                 if (unlikely(tx_bug)) {
3038                         tg3_tx_recover(tp);
3039                         return;
3040                 }
3041         }
3042
3043         tp->tx_cons = sw_idx;
3044
3045         /* Need to make the tx_cons update visible to tg3_start_xmit()
3046          * before checking for netif_queue_stopped().  Without the
3047          * memory barrier, there is a small possibility that tg3_start_xmit()
3048          * will miss it and cause the queue to be stopped forever.
3049          */
3050         smp_mb();
3051
3052         if (unlikely(netif_queue_stopped(tp->dev) &&
3053                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3054                 netif_tx_lock(tp->dev);
3055                 if (netif_queue_stopped(tp->dev) &&
3056                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3057                         netif_wake_queue(tp->dev);
3058                 netif_tx_unlock(tp->dev);
3059         }
3060 }
3061
3062 /* Returns size of skb allocated or < 0 on error.
3063  *
3064  * We only need to fill in the address because the other members
3065  * of the RX descriptor are invariant, see tg3_init_rings.
3066  *
3067  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3068  * posting buffers we only dirty the first cache line of the RX
3069  * descriptor (containing the address).  Whereas for the RX status
3070  * buffers the cpu only reads the last cacheline of the RX descriptor
3071  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3072  */
3073 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3074                             int src_idx, u32 dest_idx_unmasked)
3075 {
3076         struct tg3_rx_buffer_desc *desc;
3077         struct ring_info *map, *src_map;
3078         struct sk_buff *skb;
3079         dma_addr_t mapping;
3080         int skb_size, dest_idx;
3081
3082         src_map = NULL;
3083         switch (opaque_key) {
3084         case RXD_OPAQUE_RING_STD:
3085                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3086                 desc = &tp->rx_std[dest_idx];
3087                 map = &tp->rx_std_buffers[dest_idx];
3088                 if (src_idx >= 0)
3089                         src_map = &tp->rx_std_buffers[src_idx];
3090                 skb_size = tp->rx_pkt_buf_sz;
3091                 break;
3092
3093         case RXD_OPAQUE_RING_JUMBO:
3094                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3095                 desc = &tp->rx_jumbo[dest_idx];
3096                 map = &tp->rx_jumbo_buffers[dest_idx];
3097                 if (src_idx >= 0)
3098                         src_map = &tp->rx_jumbo_buffers[src_idx];
3099                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3100                 break;
3101
3102         default:
3103                 return -EINVAL;
3104         };
3105
3106         /* Do not overwrite any of the map or rp information
3107          * until we are sure we can commit to a new buffer.
3108          *
3109          * Callers depend upon this behavior and assume that
3110          * we leave everything unchanged if we fail.
3111          */
3112         skb = netdev_alloc_skb(tp->dev, skb_size);
3113         if (skb == NULL)
3114                 return -ENOMEM;
3115
3116         skb_reserve(skb, tp->rx_offset);
3117
3118         mapping = pci_map_single(tp->pdev, skb->data,
3119                                  skb_size - tp->rx_offset,
3120                                  PCI_DMA_FROMDEVICE);
3121
3122         map->skb = skb;
3123         pci_unmap_addr_set(map, mapping, mapping);
3124
3125         if (src_map != NULL)
3126                 src_map->skb = NULL;
3127
3128         desc->addr_hi = ((u64)mapping >> 32);
3129         desc->addr_lo = ((u64)mapping & 0xffffffff);
3130
3131         return skb_size;
3132 }
3133
3134 /* We only need to move over in the address because the other
3135  * members of the RX descriptor are invariant.  See notes above
3136  * tg3_alloc_rx_skb for full details.
3137  */
3138 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3139                            int src_idx, u32 dest_idx_unmasked)
3140 {
3141         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3142         struct ring_info *src_map, *dest_map;
3143         int dest_idx;
3144
3145         switch (opaque_key) {
3146         case RXD_OPAQUE_RING_STD:
3147                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3148                 dest_desc = &tp->rx_std[dest_idx];
3149                 dest_map = &tp->rx_std_buffers[dest_idx];
3150                 src_desc = &tp->rx_std[src_idx];
3151                 src_map = &tp->rx_std_buffers[src_idx];
3152                 break;
3153
3154         case RXD_OPAQUE_RING_JUMBO:
3155                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3156                 dest_desc = &tp->rx_jumbo[dest_idx];
3157                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3158                 src_desc = &tp->rx_jumbo[src_idx];
3159                 src_map = &tp->rx_jumbo_buffers[src_idx];
3160                 break;
3161
3162         default:
3163                 return;
3164         };
3165
3166         dest_map->skb = src_map->skb;
3167         pci_unmap_addr_set(dest_map, mapping,
3168                            pci_unmap_addr(src_map, mapping));
3169         dest_desc->addr_hi = src_desc->addr_hi;
3170         dest_desc->addr_lo = src_desc->addr_lo;
3171
3172         src_map->skb = NULL;
3173 }
3174
3175 #if TG3_VLAN_TAG_USED
3176 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3177 {
3178         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3179 }
3180 #endif
3181
3182 /* The RX ring scheme is composed of multiple rings which post fresh
3183  * buffers to the chip, and one special ring the chip uses to report
3184  * status back to the host.
3185  *
3186  * The special ring reports the status of received packets to the
3187  * host.  The chip does not write into the original descriptor the
3188  * RX buffer was obtained from.  The chip simply takes the original
3189  * descriptor as provided by the host, updates the status and length
3190  * field, then writes this into the next status ring entry.
3191  *
3192  * Each ring the host uses to post buffers to the chip is described
3193  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3194  * it is first placed into the on-chip ram.  When the packet's length
3195  * is known, it walks down the TG3_BDINFO entries to select the ring.
3196  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3197  * which is within the range of the new packet's length is chosen.
3198  *
3199  * The "separate ring for rx status" scheme may sound queer, but it makes
3200  * sense from a cache coherency perspective.  If only the host writes
3201  * to the buffer post rings, and only the chip writes to the rx status
3202  * rings, then cache lines never move beyond shared-modified state.
3203  * If both the host and chip were to write into the same ring, cache line
3204  * eviction could occur since both entities want it in an exclusive state.
3205  */
3206 static int tg3_rx(struct tg3 *tp, int budget)
3207 {
3208         u32 work_mask, rx_std_posted = 0;
3209         u32 sw_idx = tp->rx_rcb_ptr;
3210         u16 hw_idx;
3211         int received;
3212
3213         hw_idx = tp->hw_status->idx[0].rx_producer;
3214         /*
3215          * We need to order the read of hw_idx and the read of
3216          * the opaque cookie.
3217          */
3218         rmb();
3219         work_mask = 0;
3220         received = 0;
3221         while (sw_idx != hw_idx && budget > 0) {
3222                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3223                 unsigned int len;
3224                 struct sk_buff *skb;
3225                 dma_addr_t dma_addr;
3226                 u32 opaque_key, desc_idx, *post_ptr;
3227
3228                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3229                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3230                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3231                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3232                                                   mapping);
3233                         skb = tp->rx_std_buffers[desc_idx].skb;
3234                         post_ptr = &tp->rx_std_ptr;
3235                         rx_std_posted++;
3236                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3237                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3238                                                   mapping);
3239                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3240                         post_ptr = &tp->rx_jumbo_ptr;
3241                 }
3242                 else {
3243                         goto next_pkt_nopost;
3244                 }
3245
3246                 work_mask |= opaque_key;
3247
3248                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3249                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3250                 drop_it:
3251                         tg3_recycle_rx(tp, opaque_key,
3252                                        desc_idx, *post_ptr);
3253                 drop_it_no_recycle:
3254                         /* Other statistics kept track of by card. */
3255                         tp->net_stats.rx_dropped++;
3256                         goto next_pkt;
3257                 }
3258
3259                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3260
3261                 if (len > RX_COPY_THRESHOLD 
3262                         && tp->rx_offset == 2
3263                         /* rx_offset != 2 iff this is a 5701 card running
3264                          * in PCI-X mode [see tg3_get_invariants()] */
3265                 ) {
3266                         int skb_size;
3267
3268                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3269                                                     desc_idx, *post_ptr);
3270                         if (skb_size < 0)
3271                                 goto drop_it;
3272
3273                         pci_unmap_single(tp->pdev, dma_addr,
3274                                          skb_size - tp->rx_offset,
3275                                          PCI_DMA_FROMDEVICE);
3276
3277                         skb_put(skb, len);
3278                 } else {
3279                         struct sk_buff *copy_skb;
3280
3281                         tg3_recycle_rx(tp, opaque_key,
3282                                        desc_idx, *post_ptr);
3283
3284                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3285                         if (copy_skb == NULL)
3286                                 goto drop_it_no_recycle;
3287
3288                         skb_reserve(copy_skb, 2);
3289                         skb_put(copy_skb, len);
3290                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3291                         memcpy(copy_skb->data, skb->data, len);
3292                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3293
3294                         /* We'll reuse the original ring buffer. */
3295                         skb = copy_skb;
3296                 }
3297
3298                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3299                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3300                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3301                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3302                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3303                 else
3304                         skb->ip_summed = CHECKSUM_NONE;
3305
3306                 skb->protocol = eth_type_trans(skb, tp->dev);
3307 #if TG3_VLAN_TAG_USED
3308                 if (tp->vlgrp != NULL &&
3309                     desc->type_flags & RXD_FLAG_VLAN) {
3310                         tg3_vlan_rx(tp, skb,
3311                                     desc->err_vlan & RXD_VLAN_MASK);
3312                 } else
3313 #endif
3314                         netif_receive_skb(skb);
3315
3316                 tp->dev->last_rx = jiffies;
3317                 received++;
3318                 budget--;
3319
3320 next_pkt:
3321                 (*post_ptr)++;
3322
3323                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3324                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3325
3326                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3327                                      TG3_64BIT_REG_LOW, idx);
3328                         work_mask &= ~RXD_OPAQUE_RING_STD;
3329                         rx_std_posted = 0;
3330                 }
3331 next_pkt_nopost:
3332                 sw_idx++;
3333                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3334
3335                 /* Refresh hw_idx to see if there is new work */
3336                 if (sw_idx == hw_idx) {
3337                         hw_idx = tp->hw_status->idx[0].rx_producer;
3338                         rmb();
3339                 }
3340         }
3341
3342         /* ACK the status ring. */
3343         tp->rx_rcb_ptr = sw_idx;
3344         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3345
3346         /* Refill RX ring(s). */
3347         if (work_mask & RXD_OPAQUE_RING_STD) {
3348                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3349                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3350                              sw_idx);
3351         }
3352         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3353                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3354                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3355                              sw_idx);
3356         }
3357         mmiowb();
3358
3359         return received;
3360 }
3361
3362 static int tg3_poll(struct net_device *netdev, int *budget)
3363 {
3364         struct tg3 *tp = netdev_priv(netdev);
3365         struct tg3_hw_status *sblk = tp->hw_status;
3366         int done;
3367
3368         /* handle link change and other phy events */
3369         if (!(tp->tg3_flags &
3370               (TG3_FLAG_USE_LINKCHG_REG |
3371                TG3_FLAG_POLL_SERDES))) {
3372                 if (sblk->status & SD_STATUS_LINK_CHG) {
3373                         sblk->status = SD_STATUS_UPDATED |
3374                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3375                         spin_lock(&tp->lock);
3376                         tg3_setup_phy(tp, 0);
3377                         spin_unlock(&tp->lock);
3378                 }
3379         }
3380
3381         /* run TX completion thread */
3382         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3383                 tg3_tx(tp);
3384                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3385                         netif_rx_complete(netdev);
3386                         schedule_work(&tp->reset_task);
3387                         return 0;
3388                 }
3389         }
3390
3391         /* run RX thread, within the bounds set by NAPI.
3392          * All RX "locking" is done by ensuring outside
3393          * code synchronizes with dev->poll()
3394          */
3395         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3396                 int orig_budget = *budget;
3397                 int work_done;
3398
3399                 if (orig_budget > netdev->quota)
3400                         orig_budget = netdev->quota;
3401
3402                 work_done = tg3_rx(tp, orig_budget);
3403
3404                 *budget -= work_done;
3405                 netdev->quota -= work_done;
3406         }
3407
3408         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3409                 tp->last_tag = sblk->status_tag;
3410                 rmb();
3411         } else
3412                 sblk->status &= ~SD_STATUS_UPDATED;
3413
3414         /* if no more work, tell net stack and NIC we're done */
3415         done = !tg3_has_work(tp);
3416         if (done) {
3417                 netif_rx_complete(netdev);
3418                 tg3_restart_ints(tp);
3419         }
3420
3421         return (done ? 0 : 1);
3422 }
3423
3424 static void tg3_irq_quiesce(struct tg3 *tp)
3425 {
3426         BUG_ON(tp->irq_sync);
3427
3428         tp->irq_sync = 1;
3429         smp_mb();
3430
3431         synchronize_irq(tp->pdev->irq);
3432 }
3433
3434 static inline int tg3_irq_sync(struct tg3 *tp)
3435 {
3436         return tp->irq_sync;
3437 }
3438
3439 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3440  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3441  * with as well.  Most of the time, this is not necessary except when
3442  * shutting down the device.
3443  */
3444 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3445 {
3446         if (irq_sync)
3447                 tg3_irq_quiesce(tp);
3448         spin_lock_bh(&tp->lock);
3449 }
3450
3451 static inline void tg3_full_unlock(struct tg3 *tp)
3452 {
3453         spin_unlock_bh(&tp->lock);
3454 }
3455
3456 /* One-shot MSI handler - Chip automatically disables interrupt
3457  * after sending MSI so driver doesn't have to do it.
3458  */
3459 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3460 {
3461         struct net_device *dev = dev_id;
3462         struct tg3 *tp = netdev_priv(dev);
3463
3464         prefetch(tp->hw_status);
3465         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3466
3467         if (likely(!tg3_irq_sync(tp)))
3468                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3469
3470         return IRQ_HANDLED;
3471 }
3472
3473 /* MSI ISR - No need to check for interrupt sharing and no need to
3474  * flush status block and interrupt mailbox. PCI ordering rules
3475  * guarantee that MSI will arrive after the status block.
3476  */
3477 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3478 {
3479         struct net_device *dev = dev_id;
3480         struct tg3 *tp = netdev_priv(dev);
3481
3482         prefetch(tp->hw_status);
3483         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3484         /*
3485          * Writing any value to intr-mbox-0 clears PCI INTA# and
3486          * chip-internal interrupt pending events.
3487          * Writing non-zero to intr-mbox-0 additional tells the
3488          * NIC to stop sending us irqs, engaging "in-intr-handler"
3489          * event coalescing.
3490          */
3491         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3492         if (likely(!tg3_irq_sync(tp)))
3493                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3494
3495         return IRQ_RETVAL(1);
3496 }
3497
3498 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3499 {
3500         struct net_device *dev = dev_id;
3501         struct tg3 *tp = netdev_priv(dev);
3502         struct tg3_hw_status *sblk = tp->hw_status;
3503         unsigned int handled = 1;
3504
3505         /* In INTx mode, it is possible for the interrupt to arrive at
3506          * the CPU before the status block posted prior to the interrupt.
3507          * Reading the PCI State register will confirm whether the
3508          * interrupt is ours and will flush the status block.
3509          */
3510         if ((sblk->status & SD_STATUS_UPDATED) ||
3511             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3512                 /*
3513                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3514                  * chip-internal interrupt pending events.
3515                  * Writing non-zero to intr-mbox-0 additional tells the
3516                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3517                  * event coalescing.
3518                  */
3519                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3520                              0x00000001);
3521                 if (tg3_irq_sync(tp))
3522                         goto out;
3523                 sblk->status &= ~SD_STATUS_UPDATED;
3524                 if (likely(tg3_has_work(tp))) {
3525                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3526                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3527                 } else {
3528                         /* No work, shared interrupt perhaps?  re-enable
3529                          * interrupts, and flush that PCI write
3530                          */
3531                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3532                                 0x00000000);
3533                 }
3534         } else {        /* shared interrupt */
3535                 handled = 0;
3536         }
3537 out:
3538         return IRQ_RETVAL(handled);
3539 }
3540
3541 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3542 {
3543         struct net_device *dev = dev_id;
3544         struct tg3 *tp = netdev_priv(dev);
3545         struct tg3_hw_status *sblk = tp->hw_status;
3546         unsigned int handled = 1;
3547
3548         /* In INTx mode, it is possible for the interrupt to arrive at
3549          * the CPU before the status block posted prior to the interrupt.
3550          * Reading the PCI State register will confirm whether the
3551          * interrupt is ours and will flush the status block.
3552          */
3553         if ((sblk->status_tag != tp->last_tag) ||
3554             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3555                 /*
3556                  * writing any value to intr-mbox-0 clears PCI INTA# and
3557                  * chip-internal interrupt pending events.
3558                  * writing non-zero to intr-mbox-0 additional tells the
3559                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3560                  * event coalescing.
3561                  */
3562                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3563                              0x00000001);
3564                 if (tg3_irq_sync(tp))
3565                         goto out;
3566                 if (netif_rx_schedule_prep(dev)) {
3567                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3568                         /* Update last_tag to mark that this status has been
3569                          * seen. Because interrupt may be shared, we may be
3570                          * racing with tg3_poll(), so only update last_tag
3571                          * if tg3_poll() is not scheduled.
3572                          */
3573                         tp->last_tag = sblk->status_tag;
3574                         __netif_rx_schedule(dev);
3575                 }
3576         } else {        /* shared interrupt */
3577                 handled = 0;
3578         }
3579 out:
3580         return IRQ_RETVAL(handled);
3581 }
3582
3583 /* ISR for interrupt test */
3584 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3585                 struct pt_regs *regs)
3586 {
3587         struct net_device *dev = dev_id;
3588         struct tg3 *tp = netdev_priv(dev);
3589         struct tg3_hw_status *sblk = tp->hw_status;
3590
3591         if ((sblk->status & SD_STATUS_UPDATED) ||
3592             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3593                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3594                              0x00000001);
3595                 return IRQ_RETVAL(1);
3596         }
3597         return IRQ_RETVAL(0);
3598 }
3599
3600 static int tg3_init_hw(struct tg3 *, int);
3601 static int tg3_halt(struct tg3 *, int, int);
3602
3603 /* Restart hardware after configuration changes, self-test, etc.
3604  * Invoked with tp->lock held.
3605  */
3606 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3607 {
3608         int err;
3609
3610         err = tg3_init_hw(tp, reset_phy);
3611         if (err) {
3612                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3613                        "aborting.\n", tp->dev->name);
3614                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3615                 tg3_full_unlock(tp);
3616                 del_timer_sync(&tp->timer);
3617                 tp->irq_sync = 0;
3618                 netif_poll_enable(tp->dev);
3619                 dev_close(tp->dev);
3620                 tg3_full_lock(tp, 0);
3621         }
3622         return err;
3623 }
3624
3625 #ifdef CONFIG_NET_POLL_CONTROLLER
3626 static void tg3_poll_controller(struct net_device *dev)
3627 {
3628         struct tg3 *tp = netdev_priv(dev);
3629
3630         tg3_interrupt(tp->pdev->irq, dev, NULL);
3631 }
3632 #endif
3633
3634 static void tg3_reset_task(void *_data)
3635 {
3636         struct tg3 *tp = _data;
3637         unsigned int restart_timer;
3638
3639         tg3_full_lock(tp, 0);
3640         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3641
3642         if (!netif_running(tp->dev)) {
3643                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3644                 tg3_full_unlock(tp);
3645                 return;
3646         }
3647
3648         tg3_full_unlock(tp);
3649
3650         tg3_netif_stop(tp);
3651
3652         tg3_full_lock(tp, 1);
3653
3654         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3655         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3656
3657         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3658                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3659                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3660                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3661                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3662         }
3663
3664         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3665         if (tg3_init_hw(tp, 1))
3666                 goto out;
3667
3668         tg3_netif_start(tp);
3669
3670         if (restart_timer)
3671                 mod_timer(&tp->timer, jiffies + 1);
3672
3673 out:
3674         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3675
3676         tg3_full_unlock(tp);
3677 }
3678
3679 static void tg3_tx_timeout(struct net_device *dev)
3680 {
3681         struct tg3 *tp = netdev_priv(dev);
3682
3683         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3684                dev->name);
3685
3686         schedule_work(&tp->reset_task);
3687 }
3688
3689 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3690 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3691 {
3692         u32 base = (u32) mapping & 0xffffffff;
3693
3694         return ((base > 0xffffdcc0) &&
3695                 (base + len + 8 < base));
3696 }
3697
3698 /* Test for DMA addresses > 40-bit */
3699 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3700                                           int len)
3701 {
3702 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3703         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3704                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3705         return 0;
3706 #else
3707         return 0;
3708 #endif
3709 }
3710
3711 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3712
3713 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3714 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3715                                        u32 last_plus_one, u32 *start,
3716                                        u32 base_flags, u32 mss)
3717 {
3718         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3719         dma_addr_t new_addr = 0;
3720         u32 entry = *start;
3721         int i, ret = 0;
3722
3723         if (!new_skb) {
3724                 ret = -1;
3725         } else {
3726                 /* New SKB is guaranteed to be linear. */
3727                 entry = *start;
3728                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3729                                           PCI_DMA_TODEVICE);
3730                 /* Make sure new skb does not cross any 4G boundaries.
3731                  * Drop the packet if it does.
3732                  */
3733                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3734                         ret = -1;
3735                         dev_kfree_skb(new_skb);
3736                         new_skb = NULL;
3737                 } else {
3738                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3739                                     base_flags, 1 | (mss << 1));
3740                         *start = NEXT_TX(entry);
3741                 }
3742         }
3743
3744         /* Now clean up the sw ring entries. */
3745         i = 0;
3746         while (entry != last_plus_one) {
3747                 int len;
3748
3749                 if (i == 0)
3750                         len = skb_headlen(skb);
3751                 else
3752                         len = skb_shinfo(skb)->frags[i-1].size;
3753                 pci_unmap_single(tp->pdev,
3754                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3755                                  len, PCI_DMA_TODEVICE);
3756                 if (i == 0) {
3757                         tp->tx_buffers[entry].skb = new_skb;
3758                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3759                 } else {
3760                         tp->tx_buffers[entry].skb = NULL;
3761                 }
3762                 entry = NEXT_TX(entry);
3763                 i++;
3764         }
3765
3766         dev_kfree_skb(skb);
3767
3768         return ret;
3769 }
3770
3771 static void tg3_set_txd(struct tg3 *tp, int entry,
3772                         dma_addr_t mapping, int len, u32 flags,
3773                         u32 mss_and_is_end)
3774 {
3775         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3776         int is_end = (mss_and_is_end & 0x1);
3777         u32 mss = (mss_and_is_end >> 1);
3778         u32 vlan_tag = 0;
3779
3780         if (is_end)
3781                 flags |= TXD_FLAG_END;
3782         if (flags & TXD_FLAG_VLAN) {
3783                 vlan_tag = flags >> 16;
3784                 flags &= 0xffff;
3785         }
3786         vlan_tag |= (mss << TXD_MSS_SHIFT);
3787
3788         txd->addr_hi = ((u64) mapping >> 32);
3789         txd->addr_lo = ((u64) mapping & 0xffffffff);
3790         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3791         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3792 }
3793
3794 /* hard_start_xmit for devices that don't have any bugs and
3795  * support TG3_FLG2_HW_TSO_2 only.
3796  */
3797 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3798 {
3799         struct tg3 *tp = netdev_priv(dev);
3800         dma_addr_t mapping;
3801         u32 len, entry, base_flags, mss;
3802
3803         len = skb_headlen(skb);
3804
3805         /* We are running in BH disabled context with netif_tx_lock
3806          * and TX reclaim runs via tp->poll inside of a software
3807          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3808          * no IRQ context deadlocks to worry about either.  Rejoice!
3809          */
3810         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3811                 if (!netif_queue_stopped(dev)) {
3812                         netif_stop_queue(dev);
3813
3814                         /* This is a hard error, log it. */
3815                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3816                                "queue awake!\n", dev->name);
3817                 }
3818                 return NETDEV_TX_BUSY;
3819         }
3820
3821         entry = tp->tx_prod;
3822         base_flags = 0;
3823 #if TG3_TSO_SUPPORT != 0
3824         mss = 0;
3825         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3826             (mss = skb_shinfo(skb)->gso_size) != 0) {
3827                 int tcp_opt_len, ip_tcp_len;
3828
3829                 if (skb_header_cloned(skb) &&
3830                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3831                         dev_kfree_skb(skb);
3832                         goto out_unlock;
3833                 }
3834
3835                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3836                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3837                 else {
3838                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3839                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3840                                      sizeof(struct tcphdr);
3841
3842                         skb->nh.iph->check = 0;
3843                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3844                                                      tcp_opt_len);
3845                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3846                 }
3847
3848                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3849                                TXD_FLAG_CPU_POST_DMA);
3850
3851                 skb->h.th->check = 0;
3852
3853         }
3854         else if (skb->ip_summed == CHECKSUM_HW)
3855                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3856 #else
3857         mss = 0;
3858         if (skb->ip_summed == CHECKSUM_HW)
3859                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3860 #endif
3861 #if TG3_VLAN_TAG_USED
3862         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3863                 base_flags |= (TXD_FLAG_VLAN |
3864                                (vlan_tx_tag_get(skb) << 16));
3865 #endif
3866
3867         /* Queue skb data, a.k.a. the main skb fragment. */
3868         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3869
3870         tp->tx_buffers[entry].skb = skb;
3871         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3872
3873         tg3_set_txd(tp, entry, mapping, len, base_flags,
3874                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3875
3876         entry = NEXT_TX(entry);
3877
3878         /* Now loop through additional data fragments, and queue them. */
3879         if (skb_shinfo(skb)->nr_frags > 0) {
3880                 unsigned int i, last;
3881
3882                 last = skb_shinfo(skb)->nr_frags - 1;
3883                 for (i = 0; i <= last; i++) {
3884                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3885
3886                         len = frag->size;
3887                         mapping = pci_map_page(tp->pdev,
3888                                                frag->page,
3889                                                frag->page_offset,
3890                                                len, PCI_DMA_TODEVICE);
3891
3892                         tp->tx_buffers[entry].skb = NULL;
3893                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3894
3895                         tg3_set_txd(tp, entry, mapping, len,
3896                                     base_flags, (i == last) | (mss << 1));
3897
3898                         entry = NEXT_TX(entry);
3899                 }
3900         }
3901
3902         /* Packets are ready, update Tx producer idx local and on card. */
3903         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3904
3905         tp->tx_prod = entry;
3906         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3907                 netif_stop_queue(dev);
3908                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3909                         netif_wake_queue(tp->dev);
3910         }
3911
3912 out_unlock:
3913         mmiowb();
3914
3915         dev->trans_start = jiffies;
3916
3917         return NETDEV_TX_OK;
3918 }
3919
3920 #if TG3_TSO_SUPPORT != 0
3921 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3922
3923 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3924  * TSO header is greater than 80 bytes.
3925  */
3926 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3927 {
3928         struct sk_buff *segs, *nskb;
3929
3930         /* Estimate the number of fragments in the worst case */
3931         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3932                 netif_stop_queue(tp->dev);
3933                 return NETDEV_TX_BUSY;
3934         }
3935
3936         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3937         if (unlikely(IS_ERR(segs)))
3938                 goto tg3_tso_bug_end;
3939
3940         do {
3941                 nskb = segs;
3942                 segs = segs->next;
3943                 nskb->next = NULL;
3944                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3945         } while (segs);
3946
3947 tg3_tso_bug_end:
3948         dev_kfree_skb(skb);
3949
3950         return NETDEV_TX_OK;
3951 }
3952 #endif
3953
3954 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3955  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3956  */
3957 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3958 {
3959         struct tg3 *tp = netdev_priv(dev);
3960         dma_addr_t mapping;
3961         u32 len, entry, base_flags, mss;
3962         int would_hit_hwbug;
3963
3964         len = skb_headlen(skb);
3965
3966         /* We are running in BH disabled context with netif_tx_lock
3967          * and TX reclaim runs via tp->poll inside of a software
3968          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3969          * no IRQ context deadlocks to worry about either.  Rejoice!
3970          */
3971         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3972                 if (!netif_queue_stopped(dev)) {
3973                         netif_stop_queue(dev);
3974
3975                         /* This is a hard error, log it. */
3976                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3977                                "queue awake!\n", dev->name);
3978                 }
3979                 return NETDEV_TX_BUSY;
3980         }
3981
3982         entry = tp->tx_prod;
3983         base_flags = 0;
3984         if (skb->ip_summed == CHECKSUM_HW)
3985                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3986 #if TG3_TSO_SUPPORT != 0
3987         mss = 0;
3988         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3989             (mss = skb_shinfo(skb)->gso_size) != 0) {
3990                 int tcp_opt_len, ip_tcp_len, hdr_len;
3991
3992                 if (skb_header_cloned(skb) &&
3993                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3994                         dev_kfree_skb(skb);
3995                         goto out_unlock;
3996                 }
3997
3998                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3999                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4000
4001                 hdr_len = ip_tcp_len + tcp_opt_len;
4002                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4003                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4004                         return (tg3_tso_bug(tp, skb));
4005
4006                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4007                                TXD_FLAG_CPU_POST_DMA);
4008
4009                 skb->nh.iph->check = 0;
4010                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4011                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4012                         skb->h.th->check = 0;
4013                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4014                 }
4015                 else {
4016                         skb->h.th->check =
4017                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4018                                                    skb->nh.iph->daddr,
4019                                                    0, IPPROTO_TCP, 0);
4020                 }
4021
4022                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4023                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4024                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4025                                 int tsflags;
4026
4027                                 tsflags = ((skb->nh.iph->ihl - 5) +
4028                                            (tcp_opt_len >> 2));
4029                                 mss |= (tsflags << 11);
4030                         }
4031                 } else {
4032                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4033                                 int tsflags;
4034
4035                                 tsflags = ((skb->nh.iph->ihl - 5) +
4036                                            (tcp_opt_len >> 2));
4037                                 base_flags |= tsflags << 12;
4038                         }
4039                 }
4040         }
4041 #else
4042         mss = 0;
4043 #endif
4044 #if TG3_VLAN_TAG_USED
4045         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4046                 base_flags |= (TXD_FLAG_VLAN |
4047                                (vlan_tx_tag_get(skb) << 16));
4048 #endif
4049
4050         /* Queue skb data, a.k.a. the main skb fragment. */
4051         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4052
4053         tp->tx_buffers[entry].skb = skb;
4054         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4055
4056         would_hit_hwbug = 0;
4057
4058         if (tg3_4g_overflow_test(mapping, len))
4059                 would_hit_hwbug = 1;
4060
4061         tg3_set_txd(tp, entry, mapping, len, base_flags,
4062                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4063
4064         entry = NEXT_TX(entry);
4065
4066         /* Now loop through additional data fragments, and queue them. */
4067         if (skb_shinfo(skb)->nr_frags > 0) {
4068                 unsigned int i, last;
4069
4070                 last = skb_shinfo(skb)->nr_frags - 1;
4071                 for (i = 0; i <= last; i++) {
4072                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4073
4074                         len = frag->size;
4075                         mapping = pci_map_page(tp->pdev,
4076                                                frag->page,
4077                                                frag->page_offset,
4078                                                len, PCI_DMA_TODEVICE);
4079
4080                         tp->tx_buffers[entry].skb = NULL;
4081                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4082
4083                         if (tg3_4g_overflow_test(mapping, len))
4084                                 would_hit_hwbug = 1;
4085
4086                         if (tg3_40bit_overflow_test(tp, mapping, len))
4087                                 would_hit_hwbug = 1;
4088
4089                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4090                                 tg3_set_txd(tp, entry, mapping, len,
4091                                             base_flags, (i == last)|(mss << 1));
4092                         else
4093                                 tg3_set_txd(tp, entry, mapping, len,
4094                                             base_flags, (i == last));
4095
4096                         entry = NEXT_TX(entry);
4097                 }
4098         }
4099
4100         if (would_hit_hwbug) {
4101                 u32 last_plus_one = entry;
4102                 u32 start;
4103
4104                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4105                 start &= (TG3_TX_RING_SIZE - 1);
4106
4107                 /* If the workaround fails due to memory/mapping
4108                  * failure, silently drop this packet.
4109                  */
4110                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4111                                                 &start, base_flags, mss))
4112                         goto out_unlock;
4113
4114                 entry = start;
4115         }
4116
4117         /* Packets are ready, update Tx producer idx local and on card. */
4118         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4119
4120         tp->tx_prod = entry;
4121         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4122                 netif_stop_queue(dev);
4123                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4124                         netif_wake_queue(tp->dev);
4125         }
4126
4127 out_unlock:
4128         mmiowb();
4129
4130         dev->trans_start = jiffies;
4131
4132         return NETDEV_TX_OK;
4133 }
4134
4135 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4136                                int new_mtu)
4137 {
4138         dev->mtu = new_mtu;
4139
4140         if (new_mtu > ETH_DATA_LEN) {
4141                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4142                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4143                         ethtool_op_set_tso(dev, 0);
4144                 }
4145                 else
4146                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4147         } else {
4148                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4149                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4150                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4151         }
4152 }
4153
4154 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4155 {
4156         struct tg3 *tp = netdev_priv(dev);
4157         int err;
4158
4159         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4160                 return -EINVAL;
4161
4162         if (!netif_running(dev)) {
4163                 /* We'll just catch it later when the
4164                  * device is up'd.
4165                  */
4166                 tg3_set_mtu(dev, tp, new_mtu);
4167                 return 0;
4168         }
4169
4170         tg3_netif_stop(tp);
4171
4172         tg3_full_lock(tp, 1);
4173
4174         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4175
4176         tg3_set_mtu(dev, tp, new_mtu);
4177
4178         err = tg3_restart_hw(tp, 0);
4179
4180         if (!err)
4181                 tg3_netif_start(tp);
4182
4183         tg3_full_unlock(tp);
4184
4185         return err;
4186 }
4187
4188 /* Free up pending packets in all rx/tx rings.
4189  *
4190  * The chip has been shut down and the driver detached from
4191  * the networking, so no interrupts or new tx packets will
4192  * end up in the driver.  tp->{tx,}lock is not held and we are not
4193  * in an interrupt context and thus may sleep.
4194  */
4195 static void tg3_free_rings(struct tg3 *tp)
4196 {
4197         struct ring_info *rxp;
4198         int i;
4199
4200         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4201                 rxp = &tp->rx_std_buffers[i];
4202
4203                 if (rxp->skb == NULL)
4204                         continue;
4205                 pci_unmap_single(tp->pdev,
4206                                  pci_unmap_addr(rxp, mapping),
4207                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4208                                  PCI_DMA_FROMDEVICE);
4209                 dev_kfree_skb_any(rxp->skb);
4210                 rxp->skb = NULL;
4211         }
4212
4213         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4214                 rxp = &tp->rx_jumbo_buffers[i];
4215
4216                 if (rxp->skb == NULL)
4217                         continue;
4218                 pci_unmap_single(tp->pdev,
4219                                  pci_unmap_addr(rxp, mapping),
4220                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4221                                  PCI_DMA_FROMDEVICE);
4222                 dev_kfree_skb_any(rxp->skb);
4223                 rxp->skb = NULL;
4224         }
4225
4226         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4227                 struct tx_ring_info *txp;
4228                 struct sk_buff *skb;
4229                 int j;
4230
4231                 txp = &tp->tx_buffers[i];
4232                 skb = txp->skb;
4233
4234                 if (skb == NULL) {
4235                         i++;
4236                         continue;
4237                 }
4238
4239                 pci_unmap_single(tp->pdev,
4240                                  pci_unmap_addr(txp, mapping),
4241                                  skb_headlen(skb),
4242                                  PCI_DMA_TODEVICE);
4243                 txp->skb = NULL;
4244
4245                 i++;
4246
4247                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4248                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4249                         pci_unmap_page(tp->pdev,
4250                                        pci_unmap_addr(txp, mapping),
4251                                        skb_shinfo(skb)->frags[j].size,
4252                                        PCI_DMA_TODEVICE);
4253                         i++;
4254                 }
4255
4256                 dev_kfree_skb_any(skb);
4257         }
4258 }
4259
4260 /* Initialize tx/rx rings for packet processing.
4261  *
4262  * The chip has been shut down and the driver detached from
4263  * the networking, so no interrupts or new tx packets will
4264  * end up in the driver.  tp->{tx,}lock are held and thus
4265  * we may not sleep.
4266  */
4267 static int tg3_init_rings(struct tg3 *tp)
4268 {
4269         u32 i;
4270
4271         /* Free up all the SKBs. */
4272         tg3_free_rings(tp);
4273
4274         /* Zero out all descriptors. */
4275         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4276         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4277         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4278         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4279
4280         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4281         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4282             (tp->dev->mtu > ETH_DATA_LEN))
4283                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4284
4285         /* Initialize invariants of the rings, we only set this
4286          * stuff once.  This works because the card does not
4287          * write into the rx buffer posting rings.
4288          */
4289         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4290                 struct tg3_rx_buffer_desc *rxd;
4291
4292                 rxd = &tp->rx_std[i];
4293                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4294                         << RXD_LEN_SHIFT;
4295                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4296                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4297                                (i << RXD_OPAQUE_INDEX_SHIFT));
4298         }
4299
4300         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4301                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4302                         struct tg3_rx_buffer_desc *rxd;
4303
4304                         rxd = &tp->rx_jumbo[i];
4305                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4306                                 << RXD_LEN_SHIFT;
4307                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4308                                 RXD_FLAG_JUMBO;
4309                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4310                                (i << RXD_OPAQUE_INDEX_SHIFT));
4311                 }
4312         }
4313
4314         /* Now allocate fresh SKBs for each rx ring. */
4315         for (i = 0; i < tp->rx_pending; i++) {
4316                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4317                         printk(KERN_WARNING PFX
4318                                "%s: Using a smaller RX standard ring, "
4319                                "only %d out of %d buffers were allocated "
4320                                "successfully.\n",
4321                                tp->dev->name, i, tp->rx_pending);
4322                         if (i == 0)
4323                                 return -ENOMEM;
4324                         tp->rx_pending = i;
4325                         break;
4326                 }
4327         }
4328
4329         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4330                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4331                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4332                                              -1, i) < 0) {
4333                                 printk(KERN_WARNING PFX
4334                                        "%s: Using a smaller RX jumbo ring, "
4335                                        "only %d out of %d buffers were "
4336                                        "allocated successfully.\n",
4337                                        tp->dev->name, i, tp->rx_jumbo_pending);
4338                                 if (i == 0) {
4339                                         tg3_free_rings(tp);
4340                                         return -ENOMEM;
4341                                 }
4342                                 tp->rx_jumbo_pending = i;
4343                                 break;
4344                         }
4345                 }
4346         }
4347         return 0;
4348 }
4349
4350 /*
4351  * Must not be invoked with interrupt sources disabled and
4352  * the hardware shutdown down.
4353  */
4354 static void tg3_free_consistent(struct tg3 *tp)
4355 {
4356         kfree(tp->rx_std_buffers);
4357         tp->rx_std_buffers = NULL;
4358         if (tp->rx_std) {
4359                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4360                                     tp->rx_std, tp->rx_std_mapping);
4361                 tp->rx_std = NULL;
4362         }
4363         if (tp->rx_jumbo) {
4364                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4365                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4366                 tp->rx_jumbo = NULL;
4367         }
4368         if (tp->rx_rcb) {
4369                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4370                                     tp->rx_rcb, tp->rx_rcb_mapping);
4371                 tp->rx_rcb = NULL;
4372         }
4373         if (tp->tx_ring) {
4374                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4375                         tp->tx_ring, tp->tx_desc_mapping);
4376                 tp->tx_ring = NULL;
4377         }
4378         if (tp->hw_status) {
4379                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4380                                     tp->hw_status, tp->status_mapping);
4381                 tp->hw_status = NULL;
4382         }
4383         if (tp->hw_stats) {
4384                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4385                                     tp->hw_stats, tp->stats_mapping);
4386                 tp->hw_stats = NULL;
4387         }
4388 }
4389
4390 /*
4391  * Must not be invoked with interrupt sources disabled and
4392  * the hardware shutdown down.  Can sleep.
4393  */
4394 static int tg3_alloc_consistent(struct tg3 *tp)
4395 {
4396         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4397                                       (TG3_RX_RING_SIZE +
4398                                        TG3_RX_JUMBO_RING_SIZE)) +
4399                                      (sizeof(struct tx_ring_info) *
4400                                       TG3_TX_RING_SIZE),
4401                                      GFP_KERNEL);
4402         if (!tp->rx_std_buffers)
4403                 return -ENOMEM;
4404
4405         memset(tp->rx_std_buffers, 0,
4406                (sizeof(struct ring_info) *
4407                 (TG3_RX_RING_SIZE +
4408                  TG3_RX_JUMBO_RING_SIZE)) +
4409                (sizeof(struct tx_ring_info) *
4410                 TG3_TX_RING_SIZE));
4411
4412         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4413         tp->tx_buffers = (struct tx_ring_info *)
4414                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4415
4416         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4417                                           &tp->rx_std_mapping);
4418         if (!tp->rx_std)
4419                 goto err_out;
4420
4421         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4422                                             &tp->rx_jumbo_mapping);
4423
4424         if (!tp->rx_jumbo)
4425                 goto err_out;
4426
4427         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4428                                           &tp->rx_rcb_mapping);
4429         if (!tp->rx_rcb)
4430                 goto err_out;
4431
4432         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4433                                            &tp->tx_desc_mapping);
4434         if (!tp->tx_ring)
4435                 goto err_out;
4436
4437         tp->hw_status = pci_alloc_consistent(tp->pdev,
4438                                              TG3_HW_STATUS_SIZE,
4439                                              &tp->status_mapping);
4440         if (!tp->hw_status)
4441                 goto err_out;
4442
4443         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4444                                             sizeof(struct tg3_hw_stats),
4445                                             &tp->stats_mapping);
4446         if (!tp->hw_stats)
4447                 goto err_out;
4448
4449         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4450         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4451
4452         return 0;
4453
4454 err_out:
4455         tg3_free_consistent(tp);
4456         return -ENOMEM;
4457 }
4458
4459 #define MAX_WAIT_CNT 1000
4460
4461 /* To stop a block, clear the enable bit and poll till it
4462  * clears.  tp->lock is held.
4463  */
4464 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4465 {
4466         unsigned int i;
4467         u32 val;
4468
4469         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4470                 switch (ofs) {
4471                 case RCVLSC_MODE:
4472                 case DMAC_MODE:
4473                 case MBFREE_MODE:
4474                 case BUFMGR_MODE:
4475                 case MEMARB_MODE:
4476                         /* We can't enable/disable these bits of the
4477                          * 5705/5750, just say success.
4478                          */
4479                         return 0;
4480
4481                 default:
4482                         break;
4483                 };
4484         }
4485
4486         val = tr32(ofs);
4487         val &= ~enable_bit;
4488         tw32_f(ofs, val);
4489
4490         for (i = 0; i < MAX_WAIT_CNT; i++) {
4491                 udelay(100);
4492                 val = tr32(ofs);
4493                 if ((val & enable_bit) == 0)
4494                         break;
4495         }
4496
4497         if (i == MAX_WAIT_CNT && !silent) {
4498                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4499                        "ofs=%lx enable_bit=%x\n",
4500                        ofs, enable_bit);
4501                 return -ENODEV;
4502         }
4503
4504         return 0;
4505 }
4506
4507 /* tp->lock is held. */
4508 static int tg3_abort_hw(struct tg3 *tp, int silent)
4509 {
4510         int i, err;
4511
4512         tg3_disable_ints(tp);
4513
4514         tp->rx_mode &= ~RX_MODE_ENABLE;
4515         tw32_f(MAC_RX_MODE, tp->rx_mode);
4516         udelay(10);
4517
4518         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4519         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4520         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4521         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4522         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4524
4525         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4526         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4527         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4528         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4529         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4530         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4531         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4532
4533         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4534         tw32_f(MAC_MODE, tp->mac_mode);
4535         udelay(40);
4536
4537         tp->tx_mode &= ~TX_MODE_ENABLE;
4538         tw32_f(MAC_TX_MODE, tp->tx_mode);
4539
4540         for (i = 0; i < MAX_WAIT_CNT; i++) {
4541                 udelay(100);
4542                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4543                         break;
4544         }
4545         if (i >= MAX_WAIT_CNT) {
4546                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4547                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4548                        tp->dev->name, tr32(MAC_TX_MODE));
4549                 err |= -ENODEV;
4550         }
4551
4552         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4553         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4554         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4555
4556         tw32(FTQ_RESET, 0xffffffff);
4557         tw32(FTQ_RESET, 0x00000000);
4558
4559         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4560         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4561
4562         if (tp->hw_status)
4563                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4564         if (tp->hw_stats)
4565                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4566
4567         return err;
4568 }
4569
4570 /* tp->lock is held. */
4571 static int tg3_nvram_lock(struct tg3 *tp)
4572 {
4573         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4574                 int i;
4575
4576                 if (tp->nvram_lock_cnt == 0) {
4577                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4578                         for (i = 0; i < 8000; i++) {
4579                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4580                                         break;
4581                                 udelay(20);
4582                         }
4583                         if (i == 8000) {
4584                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4585                                 return -ENODEV;
4586                         }
4587                 }
4588                 tp->nvram_lock_cnt++;
4589         }
4590         return 0;
4591 }
4592
4593 /* tp->lock is held. */
4594 static void tg3_nvram_unlock(struct tg3 *tp)
4595 {
4596         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4597                 if (tp->nvram_lock_cnt > 0)
4598                         tp->nvram_lock_cnt--;
4599                 if (tp->nvram_lock_cnt == 0)
4600                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4601         }
4602 }
4603
4604 /* tp->lock is held. */
4605 static void tg3_enable_nvram_access(struct tg3 *tp)
4606 {
4607         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4608             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4609                 u32 nvaccess = tr32(NVRAM_ACCESS);
4610
4611                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4612         }
4613 }
4614
4615 /* tp->lock is held. */
4616 static void tg3_disable_nvram_access(struct tg3 *tp)
4617 {
4618         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4619             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4620                 u32 nvaccess = tr32(NVRAM_ACCESS);
4621
4622                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4623         }
4624 }
4625
4626 /* tp->lock is held. */
4627 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4628 {
4629         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4630                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4631
4632         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4633                 switch (kind) {
4634                 case RESET_KIND_INIT:
4635                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4636                                       DRV_STATE_START);
4637                         break;
4638
4639                 case RESET_KIND_SHUTDOWN:
4640                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4641                                       DRV_STATE_UNLOAD);
4642                         break;
4643
4644                 case RESET_KIND_SUSPEND:
4645                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4646                                       DRV_STATE_SUSPEND);
4647                         break;
4648
4649                 default:
4650                         break;
4651                 };
4652         }
4653 }
4654
4655 /* tp->lock is held. */
4656 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4657 {
4658         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4659                 switch (kind) {
4660                 case RESET_KIND_INIT:
4661                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4662                                       DRV_STATE_START_DONE);
4663                         break;
4664
4665                 case RESET_KIND_SHUTDOWN:
4666                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4667                                       DRV_STATE_UNLOAD_DONE);
4668                         break;
4669
4670                 default:
4671                         break;
4672                 };
4673         }
4674 }
4675
4676 /* tp->lock is held. */
4677 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4678 {
4679         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4680                 switch (kind) {
4681                 case RESET_KIND_INIT:
4682                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4683                                       DRV_STATE_START);
4684                         break;
4685
4686                 case RESET_KIND_SHUTDOWN:
4687                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4688                                       DRV_STATE_UNLOAD);
4689                         break;
4690
4691                 case RESET_KIND_SUSPEND:
4692                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4693                                       DRV_STATE_SUSPEND);
4694                         break;
4695
4696                 default:
4697                         break;
4698                 };
4699         }
4700 }
4701
4702 static void tg3_stop_fw(struct tg3 *);
4703
4704 /* tp->lock is held. */
4705 static int tg3_chip_reset(struct tg3 *tp)
4706 {
4707         u32 val;
4708         void (*write_op)(struct tg3 *, u32, u32);
4709         int i;
4710
4711         tg3_nvram_lock(tp);
4712
4713         /* No matching tg3_nvram_unlock() after this because
4714          * chip reset below will undo the nvram lock.
4715          */
4716         tp->nvram_lock_cnt = 0;
4717
4718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4719             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4720             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4721                 tw32(GRC_FASTBOOT_PC, 0);
4722
4723         /*
4724          * We must avoid the readl() that normally takes place.
4725          * It locks machines, causes machine checks, and other
4726          * fun things.  So, temporarily disable the 5701
4727          * hardware workaround, while we do the reset.
4728          */
4729         write_op = tp->write32;
4730         if (write_op == tg3_write_flush_reg32)
4731                 tp->write32 = tg3_write32;
4732
4733         /* do the reset */
4734         val = GRC_MISC_CFG_CORECLK_RESET;
4735
4736         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4737                 if (tr32(0x7e2c) == 0x60) {
4738                         tw32(0x7e2c, 0x20);
4739                 }
4740                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4741                         tw32(GRC_MISC_CFG, (1 << 29));
4742                         val |= (1 << 29);
4743                 }
4744         }
4745
4746         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4747                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4748         tw32(GRC_MISC_CFG, val);
4749
4750         /* restore 5701 hardware bug workaround write method */
4751         tp->write32 = write_op;
4752
4753         /* Unfortunately, we have to delay before the PCI read back.
4754          * Some 575X chips even will not respond to a PCI cfg access
4755          * when the reset command is given to the chip.
4756          *
4757          * How do these hardware designers expect things to work
4758          * properly if the PCI write is posted for a long period
4759          * of time?  It is always necessary to have some method by
4760          * which a register read back can occur to push the write
4761          * out which does the reset.
4762          *
4763          * For most tg3 variants the trick below was working.
4764          * Ho hum...
4765          */
4766         udelay(120);
4767
4768         /* Flush PCI posted writes.  The normal MMIO registers
4769          * are inaccessible at this time so this is the only
4770          * way to make this reliably (actually, this is no longer
4771          * the case, see above).  I tried to use indirect
4772          * register read/write but this upset some 5701 variants.
4773          */
4774         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4775
4776         udelay(120);
4777
4778         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4779                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4780                         int i;
4781                         u32 cfg_val;
4782
4783                         /* Wait for link training to complete.  */
4784                         for (i = 0; i < 5000; i++)
4785                                 udelay(100);
4786
4787                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4788                         pci_write_config_dword(tp->pdev, 0xc4,
4789                                                cfg_val | (1 << 15));
4790                 }
4791                 /* Set PCIE max payload size and clear error status.  */
4792                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4793         }
4794
4795         /* Re-enable indirect register accesses. */
4796         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4797                                tp->misc_host_ctrl);
4798
4799         /* Set MAX PCI retry to zero. */
4800         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4801         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4802             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4803                 val |= PCISTATE_RETRY_SAME_DMA;
4804         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4805
4806         pci_restore_state(tp->pdev);
4807
4808         /* Make sure PCI-X relaxed ordering bit is clear. */
4809         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4810         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4811         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4812
4813         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4814                 u32 val;
4815
4816                 /* Chip reset on 5780 will reset MSI enable bit,
4817                  * so need to restore it.
4818                  */
4819                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4820                         u16 ctrl;
4821
4822                         pci_read_config_word(tp->pdev,
4823                                              tp->msi_cap + PCI_MSI_FLAGS,
4824                                              &ctrl);
4825                         pci_write_config_word(tp->pdev,
4826                                               tp->msi_cap + PCI_MSI_FLAGS,
4827                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4828                         val = tr32(MSGINT_MODE);
4829                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4830                 }
4831
4832                 val = tr32(MEMARB_MODE);
4833                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4834
4835         } else
4836                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4837
4838         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4839                 tg3_stop_fw(tp);
4840                 tw32(0x5000, 0x400);
4841         }
4842
4843         tw32(GRC_MODE, tp->grc_mode);
4844
4845         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4846                 u32 val = tr32(0xc4);
4847
4848                 tw32(0xc4, val | (1 << 15));
4849         }
4850
4851         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4852             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4853                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4854                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4855                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4856                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4857         }
4858
4859         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4860                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4861                 tw32_f(MAC_MODE, tp->mac_mode);
4862         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4863                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4864                 tw32_f(MAC_MODE, tp->mac_mode);
4865         } else
4866                 tw32_f(MAC_MODE, 0);
4867         udelay(40);
4868
4869         /* Wait for firmware initialization to complete. */
4870         for (i = 0; i < 100000; i++) {
4871                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4872                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4873                         break;
4874                 udelay(10);
4875         }
4876
4877         /* Chip might not be fitted with firmare.  Some Sun onboard
4878          * parts are configured like that.  So don't signal the timeout
4879          * of the above loop as an error, but do report the lack of
4880          * running firmware once.
4881          */
4882         if (i >= 100000 &&
4883             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4884                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4885
4886                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4887                        tp->dev->name);
4888         }
4889
4890         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4891             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4892                 u32 val = tr32(0x7c00);
4893
4894                 tw32(0x7c00, val | (1 << 25));
4895         }
4896
4897         /* Reprobe ASF enable state.  */
4898         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4899         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4900         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4901         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4902                 u32 nic_cfg;
4903
4904                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4905                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4906                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4907                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4908                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4909                 }
4910         }
4911
4912         return 0;
4913 }
4914
4915 /* tp->lock is held. */
4916 static void tg3_stop_fw(struct tg3 *tp)
4917 {
4918         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4919                 u32 val;
4920                 int i;
4921
4922                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4923                 val = tr32(GRC_RX_CPU_EVENT);
4924                 val |= (1 << 14);
4925                 tw32(GRC_RX_CPU_EVENT, val);
4926
4927                 /* Wait for RX cpu to ACK the event.  */
4928                 for (i = 0; i < 100; i++) {
4929                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4930                                 break;
4931                         udelay(1);
4932                 }
4933         }
4934 }
4935
4936 /* tp->lock is held. */
4937 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4938 {
4939         int err;
4940
4941         tg3_stop_fw(tp);
4942
4943         tg3_write_sig_pre_reset(tp, kind);
4944
4945         tg3_abort_hw(tp, silent);
4946         err = tg3_chip_reset(tp);
4947
4948         tg3_write_sig_legacy(tp, kind);
4949         tg3_write_sig_post_reset(tp, kind);
4950
4951         if (err)
4952                 return err;
4953
4954         return 0;
4955 }
4956
4957 #define TG3_FW_RELEASE_MAJOR    0x0
4958 #define TG3_FW_RELASE_MINOR     0x0
4959 #define TG3_FW_RELEASE_FIX      0x0
4960 #define TG3_FW_START_ADDR       0x08000000
4961 #define TG3_FW_TEXT_ADDR        0x08000000
4962 #define TG3_FW_TEXT_LEN         0x9c0
4963 #define TG3_FW_RODATA_ADDR      0x080009c0
4964 #define TG3_FW_RODATA_LEN       0x60
4965 #define TG3_FW_DATA_ADDR        0x08000a40
4966 #define TG3_FW_DATA_LEN         0x20
4967 #define TG3_FW_SBSS_ADDR        0x08000a60
4968 #define TG3_FW_SBSS_LEN         0xc
4969 #define TG3_FW_BSS_ADDR         0x08000a70
4970 #define TG3_FW_BSS_LEN          0x10
4971
4972 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4973         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4974         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4975         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4976         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4977         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4978         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4979         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4980         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4981         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4982         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4983         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4984         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4985         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4986         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4987         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4988         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4989         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4990         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4991         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4992         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4993         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4994         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4995         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4996         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4997         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4998         0, 0, 0, 0, 0, 0,
4999         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5000         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5001         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5002         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5003         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5004         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5005         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5006         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5007         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5008         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5009         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5010         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5011         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5012         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5013         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5014         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5015         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5016         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5017         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5018         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5019         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5020         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5021         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5022         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5023         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5024         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5025         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5026         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5027         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5028         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5029         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5030         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5031         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5032         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5033         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5034         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5035         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5036         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5037         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5038         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5039         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5040         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5041         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5042         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5043         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5044         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5045         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5046         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5047         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5048         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5049         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5050         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5051         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5052         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5053         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5054         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5055         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5056         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5057         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5058         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5059         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5060         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5061         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5062         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5063         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5064 };
5065
5066 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5067         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5068         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5069         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5070         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5071         0x00000000
5072 };
5073
5074 #if 0 /* All zeros, don't eat up space with it. */
5075 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5076         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5077         0x00000000, 0x00000000, 0x00000000, 0x00000000
5078 };
5079 #endif
5080
5081 #define RX_CPU_SCRATCH_BASE     0x30000
5082 #define RX_CPU_SCRATCH_SIZE     0x04000
5083 #define TX_CPU_SCRATCH_BASE     0x34000
5084 #define TX_CPU_SCRATCH_SIZE     0x04000
5085
5086 /* tp->lock is held. */
5087 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5088 {
5089         int i;
5090
5091         BUG_ON(offset == TX_CPU_BASE &&
5092             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5093
5094         if (offset == RX_CPU_BASE) {
5095                 for (i = 0; i < 10000; i++) {
5096                         tw32(offset + CPU_STATE, 0xffffffff);
5097                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5098                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5099                                 break;
5100                 }
5101
5102                 tw32(offset + CPU_STATE, 0xffffffff);
5103                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5104                 udelay(10);
5105         } else {
5106                 for (i = 0; i < 10000; i++) {
5107                         tw32(offset + CPU_STATE, 0xffffffff);
5108                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5109                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5110                                 break;
5111                 }
5112         }
5113
5114         if (i >= 10000) {
5115                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5116                        "and %s CPU\n",
5117                        tp->dev->name,
5118                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5119                 return -ENODEV;
5120         }
5121
5122         /* Clear firmware's nvram arbitration. */
5123         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5124                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5125         return 0;
5126 }
5127
5128 struct fw_info {
5129         unsigned int text_base;
5130         unsigned int text_len;
5131         u32 *text_data;
5132         unsigned int rodata_base;
5133         unsigned int rodata_len;
5134         u32 *rodata_data;
5135         unsigned int data_base;
5136         unsigned int data_len;
5137         u32 *data_data;
5138 };
5139
5140 /* tp->lock is held. */
5141 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5142                                  int cpu_scratch_size, struct fw_info *info)
5143 {
5144         int err, lock_err, i;
5145         void (*write_op)(struct tg3 *, u32, u32);
5146
5147         if (cpu_base == TX_CPU_BASE &&
5148             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5149                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5150                        "TX cpu firmware on %s which is 5705.\n",
5151                        tp->dev->name);
5152                 return -EINVAL;
5153         }
5154
5155         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5156                 write_op = tg3_write_mem;
5157         else
5158                 write_op = tg3_write_indirect_reg32;
5159
5160         /* It is possible that bootcode is still loading at this point.
5161          * Get the nvram lock first before halting the cpu.
5162          */
5163         lock_err = tg3_nvram_lock(tp);
5164         err = tg3_halt_cpu(tp, cpu_base);
5165         if (!lock_err)
5166                 tg3_nvram_unlock(tp);
5167         if (err)
5168                 goto out;
5169
5170         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5171                 write_op(tp, cpu_scratch_base + i, 0);
5172         tw32(cpu_base + CPU_STATE, 0xffffffff);
5173         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5174         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5175                 write_op(tp, (cpu_scratch_base +
5176                               (info->text_base & 0xffff) +
5177                               (i * sizeof(u32))),
5178                          (info->text_data ?
5179                           info->text_data[i] : 0));
5180         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5181                 write_op(tp, (cpu_scratch_base +
5182                               (info->rodata_base & 0xffff) +
5183                               (i * sizeof(u32))),
5184                          (info->rodata_data ?
5185                           info->rodata_data[i] : 0));
5186         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5187                 write_op(tp, (cpu_scratch_base +
5188                               (info->data_base & 0xffff) +
5189                               (i * sizeof(u32))),
5190                          (info->data_data ?
5191                           info->data_data[i] : 0));
5192
5193         err = 0;
5194
5195 out:
5196         return err;
5197 }
5198
5199 /* tp->lock is held. */
5200 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5201 {
5202         struct fw_info info;
5203         int err, i;
5204
5205         info.text_base = TG3_FW_TEXT_ADDR;
5206         info.text_len = TG3_FW_TEXT_LEN;
5207         info.text_data = &tg3FwText[0];
5208         info.rodata_base = TG3_FW_RODATA_ADDR;
5209         info.rodata_len = TG3_FW_RODATA_LEN;
5210         info.rodata_data = &tg3FwRodata[0];
5211         info.data_base = TG3_FW_DATA_ADDR;
5212         info.data_len = TG3_FW_DATA_LEN;
5213         info.data_data = NULL;
5214
5215         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5216                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5217                                     &info);
5218         if (err)
5219                 return err;
5220
5221         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5222                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5223                                     &info);
5224         if (err)
5225                 return err;
5226
5227         /* Now startup only the RX cpu. */
5228         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5229         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5230
5231         for (i = 0; i < 5; i++) {
5232                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5233                         break;
5234                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5235                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5236                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5237                 udelay(1000);
5238         }
5239         if (i >= 5) {
5240                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5241                        "to set RX CPU PC, is %08x should be %08x\n",
5242                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5243                        TG3_FW_TEXT_ADDR);
5244                 return -ENODEV;
5245         }
5246         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5247         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5248
5249         return 0;
5250 }
5251
5252 #if TG3_TSO_SUPPORT != 0
5253
5254 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5255 #define TG3_TSO_FW_RELASE_MINOR         0x6
5256 #define TG3_TSO_FW_RELEASE_FIX          0x0
5257 #define TG3_TSO_FW_START_ADDR           0x08000000
5258 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5259 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5260 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5261 #define TG3_TSO_FW_RODATA_LEN           0x60
5262 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5263 #define TG3_TSO_FW_DATA_LEN             0x30
5264 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5265 #define TG3_TSO_FW_SBSS_LEN             0x2c
5266 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5267 #define TG3_TSO_FW_BSS_LEN              0x894
5268
5269 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5270         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5271         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5272         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5273         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5274         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5275         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5276         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5277         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5278         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5279         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5280         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5281         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5282         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5283         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5284         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5285         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5286         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5287         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5288         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5289         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5290         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5291         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5292         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5293         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5294         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5295         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5296         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5297         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5298         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5299         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5300         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5301         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5302         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5303         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5304         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5305         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5306         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5307         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5308         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5309         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5310         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5311         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5312         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5313         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5314         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5315         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5316         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5317         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5318         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5319         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5320         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5321         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5322         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5323         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5324         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5325         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5326         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5327         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5328         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5329         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5330         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5331         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5332         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5333         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5334         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5335         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5336         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5337         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5338         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5339         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5340         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5341         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5342         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5343         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5344         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5345         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5346         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5347         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5348         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5349         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5350         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5351         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5352         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5353         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5354         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5355         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5356         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5357         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5358         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5359         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5360         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5361         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5362         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5363         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5364         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5365         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5366         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5367         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5368         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5369         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5370         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5371         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5372         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5373         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5374         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5375         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5376         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5377         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5378         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5379         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5380         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5381         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5382         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5383         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5384         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5385         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5386         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5387         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5388         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5389         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5390         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5391         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5392         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5393         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5394         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5395         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5396         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5397         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5398         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5399         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5400         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5401         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5402         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5403         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5404         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5405         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5406         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5407         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5408         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5409         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5410         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5411         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5412         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5413         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5414         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5415         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5416         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5417         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5418         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5419         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5420         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5421         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5422         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5423         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5424         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5425         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5426         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5427         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5428         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5429         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5430         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5431         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5432         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5433         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5434         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5435         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5436         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5437         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5438         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5439         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5440         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5441         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5442         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5443         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5444         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5445         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5446         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5447         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5448         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5449         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5450         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5451         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5452         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5453         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5454         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5455         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5456         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5457         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5458         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5459         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5460         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5461         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5462         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5463         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5464         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5465         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5466         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5467         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5468         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5469         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5470         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5471         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5472         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5473         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5474         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5475         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5476         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5477         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5478         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5479         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5480         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5481         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5482         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5483         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5484         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5485         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5486         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5487         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5488         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5489         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5490         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5491         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5492         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5493         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5494         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5495         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5496         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5497         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5498         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5499         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5500         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5501         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5502         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5503         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5504         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5505         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5506         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5507         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5508         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5509         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5510         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5511         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5512         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5513         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5514         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5515         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5516         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5517         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5518         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5519         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5520         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5521         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5522         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5523         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5524         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5525         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5526         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5527         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5528         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5529         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5530         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5531         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5532         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5533         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5534         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5535         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5536         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5537         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5538         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5539         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5540         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5541         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5542         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5543         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5544         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5545         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5546         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5547         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5548         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5549         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5550         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5551         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5552         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5553         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5554 };
5555
5556 static u32 tg3TsoFwRodata[] = {
5557         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5558         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5559         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5560         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5561         0x00000000,
5562 };
5563
5564 static u32 tg3TsoFwData[] = {
5565         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5566         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5567         0x00000000,
5568 };
5569
5570 /* 5705 needs a special version of the TSO firmware.  */
5571 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5572 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5573 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5574 #define TG3_TSO5_FW_START_ADDR          0x00010000
5575 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5576 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5577 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5578 #define TG3_TSO5_FW_RODATA_LEN          0x50
5579 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5580 #define TG3_TSO5_FW_DATA_LEN            0x20
5581 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5582 #define TG3_TSO5_FW_SBSS_LEN            0x28
5583 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5584 #define TG3_TSO5_FW_BSS_LEN             0x88
5585
5586 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5587         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5588         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5589         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5590         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5591         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5592         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5593         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5594         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5595         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5596         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5597         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5598         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5599         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5600         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5601         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5602         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5603         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5604         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5605         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5606         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5607         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5608         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5609         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5610         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5611         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5612         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5613         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5614         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5615         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5616         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5617         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5618         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5619         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5620         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5621         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5622         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5623         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5624         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5625         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5626         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5627         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5628         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5629         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5630         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5631         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5632         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5633         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5634         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5635         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5636         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5637         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5638         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5639         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5640         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5641         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5642         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5643         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5644         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5645         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5646         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5647         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5648         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5649         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5650         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5651         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5652         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5653         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5654         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5655         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5656         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5657         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5658         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5659         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5660         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5661         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5662         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5663         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5664         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5665         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5666         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5667         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5668         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5669         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5670         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5671         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5672         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5673         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5674         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5675         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5676         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5677         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5678         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5679         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5680         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5681         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5682         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5683         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5684         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5685         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5686         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5687         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5688         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5689         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5690         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5691         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5692         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5693         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5694         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5695         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5696         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5697         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5698         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5699         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5700         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5701         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5702         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5703         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5704         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5705         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5706         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5707         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5708         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5709         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5710         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5711         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5712         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5713         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5714         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5715         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5716         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5717         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5718         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5719         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5720         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5721         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5722         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5723         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5724         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5725         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5726         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5727         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5728         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5729         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5730         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5731         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5732         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5733         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5734         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5735         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5736         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5737         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5738         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5739         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5740         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5741         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5742         0x00000000, 0x00000000, 0x00000000,
5743 };
5744
5745 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5746         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5747         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5748         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5749         0x00000000, 0x00000000, 0x00000000,
5750 };
5751
5752 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5753         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5754         0x00000000, 0x00000000, 0x00000000,
5755 };
5756
5757 /* tp->lock is held. */
5758 static int tg3_load_tso_firmware(struct tg3 *tp)
5759 {
5760         struct fw_info info;
5761         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5762         int err, i;
5763
5764         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5765                 return 0;
5766
5767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5768                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5769                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5770                 info.text_data = &tg3Tso5FwText[0];
5771                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5772                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5773                 info.rodata_data = &tg3Tso5FwRodata[0];
5774                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5775                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5776                 info.data_data = &tg3Tso5FwData[0];
5777                 cpu_base = RX_CPU_BASE;
5778                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5779                 cpu_scratch_size = (info.text_len +
5780                                     info.rodata_len +
5781                                     info.data_len +
5782                                     TG3_TSO5_FW_SBSS_LEN +
5783                                     TG3_TSO5_FW_BSS_LEN);
5784         } else {
5785                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5786                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5787                 info.text_data = &tg3TsoFwText[0];
5788                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5789                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5790                 info.rodata_data = &tg3TsoFwRodata[0];
5791                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5792                 info.data_len = TG3_TSO_FW_DATA_LEN;
5793                 info.data_data = &tg3TsoFwData[0];
5794                 cpu_base = TX_CPU_BASE;
5795                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5796                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5797         }
5798
5799         err = tg3_load_firmware_cpu(tp, cpu_base,
5800                                     cpu_scratch_base, cpu_scratch_size,
5801                                     &info);
5802         if (err)
5803                 return err;
5804
5805         /* Now startup the cpu. */
5806         tw32(cpu_base + CPU_STATE, 0xffffffff);
5807         tw32_f(cpu_base + CPU_PC,    info.text_base);
5808
5809         for (i = 0; i < 5; i++) {
5810                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5811                         break;
5812                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5813                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5814                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5815                 udelay(1000);
5816         }
5817         if (i >= 5) {
5818                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5819                        "to set CPU PC, is %08x should be %08x\n",
5820                        tp->dev->name, tr32(cpu_base + CPU_PC),
5821                        info.text_base);
5822                 return -ENODEV;
5823         }
5824         tw32(cpu_base + CPU_STATE, 0xffffffff);
5825         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5826         return 0;
5827 }
5828
5829 #endif /* TG3_TSO_SUPPORT != 0 */
5830
5831 /* tp->lock is held. */
5832 static void __tg3_set_mac_addr(struct tg3 *tp)
5833 {
5834         u32 addr_high, addr_low;
5835         int i;
5836
5837         addr_high = ((tp->dev->dev_addr[0] << 8) |
5838                      tp->dev->dev_addr[1]);
5839         addr_low = ((tp->dev->dev_addr[2] << 24) |
5840                     (tp->dev->dev_addr[3] << 16) |
5841                     (tp->dev->dev_addr[4] <<  8) |
5842                     (tp->dev->dev_addr[5] <<  0));
5843         for (i = 0; i < 4; i++) {
5844                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5845                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5846         }
5847
5848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5850                 for (i = 0; i < 12; i++) {
5851                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5852                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5853                 }
5854         }
5855
5856         addr_high = (tp->dev->dev_addr[0] +
5857                      tp->dev->dev_addr[1] +
5858                      tp->dev->dev_addr[2] +
5859                      tp->dev->dev_addr[3] +
5860                      tp->dev->dev_addr[4] +
5861                      tp->dev->dev_addr[5]) &
5862                 TX_BACKOFF_SEED_MASK;
5863         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5864 }
5865
5866 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5867 {
5868         struct tg3 *tp = netdev_priv(dev);
5869         struct sockaddr *addr = p;
5870         int err = 0;
5871
5872         if (!is_valid_ether_addr(addr->sa_data))
5873                 return -EINVAL;
5874
5875         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5876
5877         if (!netif_running(dev))
5878                 return 0;
5879
5880         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5881                 /* Reset chip so that ASF can re-init any MAC addresses it
5882                  * needs.
5883                  */
5884                 tg3_netif_stop(tp);
5885                 tg3_full_lock(tp, 1);
5886
5887                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5888                 err = tg3_restart_hw(tp, 0);
5889                 if (!err)
5890                         tg3_netif_start(tp);
5891                 tg3_full_unlock(tp);
5892         } else {
5893                 spin_lock_bh(&tp->lock);
5894                 __tg3_set_mac_addr(tp);
5895                 spin_unlock_bh(&tp->lock);
5896         }
5897
5898         return err;
5899 }
5900
5901 /* tp->lock is held. */
5902 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5903                            dma_addr_t mapping, u32 maxlen_flags,
5904                            u32 nic_addr)
5905 {
5906         tg3_write_mem(tp,
5907                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5908                       ((u64) mapping >> 32));
5909         tg3_write_mem(tp,
5910                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5911                       ((u64) mapping & 0xffffffff));
5912         tg3_write_mem(tp,
5913                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5914                        maxlen_flags);
5915
5916         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5917                 tg3_write_mem(tp,
5918                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5919                               nic_addr);
5920 }
5921
5922 static void __tg3_set_rx_mode(struct net_device *);
5923 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5924 {
5925         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5926         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5927         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5928         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5929         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5930                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5931                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5932         }
5933         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5934         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5935         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5936                 u32 val = ec->stats_block_coalesce_usecs;
5937
5938                 if (!netif_carrier_ok(tp->dev))
5939                         val = 0;
5940
5941                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5942         }
5943 }
5944
5945 /* tp->lock is held. */
5946 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5947 {
5948         u32 val, rdmac_mode;
5949         int i, err, limit;
5950
5951         tg3_disable_ints(tp);
5952
5953         tg3_stop_fw(tp);
5954
5955         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5956
5957         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5958                 tg3_abort_hw(tp, 1);
5959         }
5960
5961         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5962                 tg3_phy_reset(tp);
5963
5964         err = tg3_chip_reset(tp);
5965         if (err)
5966                 return err;
5967
5968         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5969
5970         /* This works around an issue with Athlon chipsets on
5971          * B3 tigon3 silicon.  This bit has no effect on any
5972          * other revision.  But do not set this on PCI Express
5973          * chips.
5974          */
5975         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5976                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5977         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5978
5979         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5980             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5981                 val = tr32(TG3PCI_PCISTATE);
5982                 val |= PCISTATE_RETRY_SAME_DMA;
5983                 tw32(TG3PCI_PCISTATE, val);
5984         }
5985
5986         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5987                 /* Enable some hw fixes.  */
5988                 val = tr32(TG3PCI_MSI_DATA);
5989                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5990                 tw32(TG3PCI_MSI_DATA, val);
5991         }
5992
5993         /* Descriptor ring init may make accesses to the
5994          * NIC SRAM area to setup the TX descriptors, so we
5995          * can only do this after the hardware has been
5996          * successfully reset.
5997          */
5998         err = tg3_init_rings(tp);
5999         if (err)
6000                 return err;
6001
6002         /* This value is determined during the probe time DMA
6003          * engine test, tg3_test_dma.
6004          */
6005         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6006
6007         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6008                           GRC_MODE_4X_NIC_SEND_RINGS |
6009                           GRC_MODE_NO_TX_PHDR_CSUM |
6010                           GRC_MODE_NO_RX_PHDR_CSUM);
6011         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6012
6013         /* Pseudo-header checksum is done by hardware logic and not
6014          * the offload processers, so make the chip do the pseudo-
6015          * header checksums on receive.  For transmit it is more
6016          * convenient to do the pseudo-header checksum in software
6017          * as Linux does that on transmit for us in all cases.
6018          */
6019         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6020
6021         tw32(GRC_MODE,
6022              tp->grc_mode |
6023              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6024
6025         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6026         val = tr32(GRC_MISC_CFG);
6027         val &= ~0xff;
6028         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6029         tw32(GRC_MISC_CFG, val);
6030
6031         /* Initialize MBUF/DESC pool. */
6032         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6033                 /* Do nothing.  */
6034         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6035                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6037                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6038                 else
6039                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6040                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6041                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6042         }
6043 #if TG3_TSO_SUPPORT != 0
6044         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6045                 int fw_len;
6046
6047                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6048                           TG3_TSO5_FW_RODATA_LEN +
6049                           TG3_TSO5_FW_DATA_LEN +
6050                           TG3_TSO5_FW_SBSS_LEN +
6051                           TG3_TSO5_FW_BSS_LEN);
6052                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6053                 tw32(BUFMGR_MB_POOL_ADDR,
6054                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6055                 tw32(BUFMGR_MB_POOL_SIZE,
6056                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6057         }
6058 #endif
6059
6060         if (tp->dev->mtu <= ETH_DATA_LEN) {
6061                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6062                      tp->bufmgr_config.mbuf_read_dma_low_water);
6063                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6064                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6065                 tw32(BUFMGR_MB_HIGH_WATER,
6066                      tp->bufmgr_config.mbuf_high_water);
6067         } else {
6068                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6069                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6070                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6071                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6072                 tw32(BUFMGR_MB_HIGH_WATER,
6073                      tp->bufmgr_config.mbuf_high_water_jumbo);
6074         }
6075         tw32(BUFMGR_DMA_LOW_WATER,
6076              tp->bufmgr_config.dma_low_water);
6077         tw32(BUFMGR_DMA_HIGH_WATER,
6078              tp->bufmgr_config.dma_high_water);
6079
6080         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6081         for (i = 0; i < 2000; i++) {
6082                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6083                         break;
6084                 udelay(10);
6085         }
6086         if (i >= 2000) {
6087                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6088                        tp->dev->name);
6089                 return -ENODEV;
6090         }
6091
6092         /* Setup replenish threshold. */
6093         val = tp->rx_pending / 8;
6094         if (val == 0)
6095                 val = 1;
6096         else if (val > tp->rx_std_max_post)
6097                 val = tp->rx_std_max_post;
6098
6099         tw32(RCVBDI_STD_THRESH, val);
6100
6101         /* Initialize TG3_BDINFO's at:
6102          *  RCVDBDI_STD_BD:     standard eth size rx ring
6103          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6104          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6105          *
6106          * like so:
6107          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6108          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6109          *                              ring attribute flags
6110          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6111          *
6112          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6113          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6114          *
6115          * The size of each ring is fixed in the firmware, but the location is
6116          * configurable.
6117          */
6118         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6119              ((u64) tp->rx_std_mapping >> 32));
6120         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6121              ((u64) tp->rx_std_mapping & 0xffffffff));
6122         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6123              NIC_SRAM_RX_BUFFER_DESC);
6124
6125         /* Don't even try to program the JUMBO/MINI buffer descriptor
6126          * configs on 5705.
6127          */
6128         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6129                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6130                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6131         } else {
6132                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6133                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6134
6135                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6136                      BDINFO_FLAGS_DISABLED);
6137
6138                 /* Setup replenish threshold. */
6139                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6140
6141                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6142                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6143                              ((u64) tp->rx_jumbo_mapping >> 32));
6144                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6145                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6146                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6147                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6148                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6149                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6150                 } else {
6151                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6152                              BDINFO_FLAGS_DISABLED);
6153                 }
6154
6155         }
6156
6157         /* There is only one send ring on 5705/5750, no need to explicitly
6158          * disable the others.
6159          */
6160         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6161                 /* Clear out send RCB ring in SRAM. */
6162                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6163                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6164                                       BDINFO_FLAGS_DISABLED);
6165         }
6166
6167         tp->tx_prod = 0;
6168         tp->tx_cons = 0;
6169         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6170         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6171
6172         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6173                        tp->tx_desc_mapping,
6174                        (TG3_TX_RING_SIZE <<
6175                         BDINFO_FLAGS_MAXLEN_SHIFT),
6176                        NIC_SRAM_TX_BUFFER_DESC);
6177
6178         /* There is only one receive return ring on 5705/5750, no need
6179          * to explicitly disable the others.
6180          */
6181         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6182                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6183                      i += TG3_BDINFO_SIZE) {
6184                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6185                                       BDINFO_FLAGS_DISABLED);
6186                 }
6187         }
6188
6189         tp->rx_rcb_ptr = 0;
6190         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6191
6192         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6193                        tp->rx_rcb_mapping,
6194                        (TG3_RX_RCB_RING_SIZE(tp) <<
6195                         BDINFO_FLAGS_MAXLEN_SHIFT),
6196                        0);
6197
6198         tp->rx_std_ptr = tp->rx_pending;
6199         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6200                      tp->rx_std_ptr);
6201
6202         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6203                                                 tp->rx_jumbo_pending : 0;
6204         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6205                      tp->rx_jumbo_ptr);
6206
6207         /* Initialize MAC address and backoff seed. */
6208         __tg3_set_mac_addr(tp);
6209
6210         /* MTU + ethernet header + FCS + optional VLAN tag */
6211         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6212
6213         /* The slot time is changed by tg3_setup_phy if we
6214          * run at gigabit with half duplex.
6215          */
6216         tw32(MAC_TX_LENGTHS,
6217              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6218              (6 << TX_LENGTHS_IPG_SHIFT) |
6219              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6220
6221         /* Receive rules. */
6222         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6223         tw32(RCVLPC_CONFIG, 0x0181);
6224
6225         /* Calculate RDMAC_MODE setting early, we need it to determine
6226          * the RCVLPC_STATE_ENABLE mask.
6227          */
6228         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6229                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6230                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6231                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6232                       RDMAC_MODE_LNGREAD_ENAB);
6233         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6234                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6235
6236         /* If statement applies to 5705 and 5750 PCI devices only */
6237         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6238              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6239             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6240                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6241                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6242                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6243                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6244                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6245                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6246                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6247                 }
6248         }
6249
6250         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6251                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6252
6253 #if TG3_TSO_SUPPORT != 0
6254         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6255                 rdmac_mode |= (1 << 27);
6256 #endif
6257
6258         /* Receive/send statistics. */
6259         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6260                 val = tr32(RCVLPC_STATS_ENABLE);
6261                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6262                 tw32(RCVLPC_STATS_ENABLE, val);
6263         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6264                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6265                 val = tr32(RCVLPC_STATS_ENABLE);
6266                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6267                 tw32(RCVLPC_STATS_ENABLE, val);
6268         } else {
6269                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6270         }
6271         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6272         tw32(SNDDATAI_STATSENAB, 0xffffff);
6273         tw32(SNDDATAI_STATSCTRL,
6274              (SNDDATAI_SCTRL_ENABLE |
6275               SNDDATAI_SCTRL_FASTUPD));
6276
6277         /* Setup host coalescing engine. */
6278         tw32(HOSTCC_MODE, 0);
6279         for (i = 0; i < 2000; i++) {
6280                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6281                         break;
6282                 udelay(10);
6283         }
6284
6285         __tg3_set_coalesce(tp, &tp->coal);
6286
6287         /* set status block DMA address */
6288         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6289              ((u64) tp->status_mapping >> 32));
6290         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6291              ((u64) tp->status_mapping & 0xffffffff));
6292
6293         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6294                 /* Status/statistics block address.  See tg3_timer,
6295                  * the tg3_periodic_fetch_stats call there, and
6296                  * tg3_get_stats to see how this works for 5705/5750 chips.
6297                  */
6298                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6299                      ((u64) tp->stats_mapping >> 32));
6300                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6301                      ((u64) tp->stats_mapping & 0xffffffff));
6302                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6303                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6304         }
6305
6306         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6307
6308         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6309         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6310         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6311                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6312
6313         /* Clear statistics/status block in chip, and status block in ram. */
6314         for (i = NIC_SRAM_STATS_BLK;
6315              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6316              i += sizeof(u32)) {
6317                 tg3_write_mem(tp, i, 0);
6318                 udelay(40);
6319         }
6320         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6321
6322         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6323                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6324                 /* reset to prevent losing 1st rx packet intermittently */
6325                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6326                 udelay(10);
6327         }
6328
6329         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6330                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6331         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6332         udelay(40);
6333
6334         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6335          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6336          * register to preserve the GPIO settings for LOMs. The GPIOs,
6337          * whether used as inputs or outputs, are set by boot code after
6338          * reset.
6339          */
6340         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6341                 u32 gpio_mask;
6342
6343                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6344                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6345
6346                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6347                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6348                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6349
6350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6351                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6352
6353                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6354
6355                 /* GPIO1 must be driven high for eeprom write protect */
6356                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6357                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6358         }
6359         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6360         udelay(100);
6361
6362         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6363         tp->last_tag = 0;
6364
6365         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6366                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6367                 udelay(40);
6368         }
6369
6370         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6371                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6372                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6373                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6374                WDMAC_MODE_LNGREAD_ENAB);
6375
6376         /* If statement applies to 5705 and 5750 PCI devices only */
6377         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6378              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6380                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6381                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6382                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6383                         /* nothing */
6384                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6385                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6386                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6387                         val |= WDMAC_MODE_RX_ACCEL;
6388                 }
6389         }
6390
6391         /* Enable host coalescing bug fix */
6392         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6393             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6394                 val |= (1 << 29);
6395
6396         tw32_f(WDMAC_MODE, val);
6397         udelay(40);
6398
6399         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6400                 val = tr32(TG3PCI_X_CAPS);
6401                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6402                         val &= ~PCIX_CAPS_BURST_MASK;
6403                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6404                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6405                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6406                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6407                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6408                                 val |= (tp->split_mode_max_reqs <<
6409                                         PCIX_CAPS_SPLIT_SHIFT);
6410                 }
6411                 tw32(TG3PCI_X_CAPS, val);
6412         }
6413
6414         tw32_f(RDMAC_MODE, rdmac_mode);
6415         udelay(40);
6416
6417         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6418         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6419                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6420         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6421         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6422         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6423         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6424         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6425 #if TG3_TSO_SUPPORT != 0
6426         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6427                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6428 #endif
6429         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6430         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6431
6432         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6433                 err = tg3_load_5701_a0_firmware_fix(tp);
6434                 if (err)
6435                         return err;
6436         }
6437
6438 #if TG3_TSO_SUPPORT != 0
6439         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6440                 err = tg3_load_tso_firmware(tp);
6441                 if (err)
6442                         return err;
6443         }
6444 #endif
6445
6446         tp->tx_mode = TX_MODE_ENABLE;
6447         tw32_f(MAC_TX_MODE, tp->tx_mode);
6448         udelay(100);
6449
6450         tp->rx_mode = RX_MODE_ENABLE;
6451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6452                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6453
6454         tw32_f(MAC_RX_MODE, tp->rx_mode);
6455         udelay(10);
6456
6457         if (tp->link_config.phy_is_low_power) {
6458                 tp->link_config.phy_is_low_power = 0;
6459                 tp->link_config.speed = tp->link_config.orig_speed;
6460                 tp->link_config.duplex = tp->link_config.orig_duplex;
6461                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6462         }
6463
6464         tp->mi_mode = MAC_MI_MODE_BASE;
6465         tw32_f(MAC_MI_MODE, tp->mi_mode);
6466         udelay(80);
6467
6468         tw32(MAC_LED_CTRL, tp->led_ctrl);
6469
6470         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6471         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6472                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6473                 udelay(10);
6474         }
6475         tw32_f(MAC_RX_MODE, tp->rx_mode);
6476         udelay(10);
6477
6478         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6479                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6480                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6481                         /* Set drive transmission level to 1.2V  */
6482                         /* only if the signal pre-emphasis bit is not set  */
6483                         val = tr32(MAC_SERDES_CFG);
6484                         val &= 0xfffff000;
6485                         val |= 0x880;
6486                         tw32(MAC_SERDES_CFG, val);
6487                 }
6488                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6489                         tw32(MAC_SERDES_CFG, 0x616000);
6490         }
6491
6492         /* Prevent chip from dropping frames when flow control
6493          * is enabled.
6494          */
6495         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6496
6497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6498             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6499                 /* Use hardware link auto-negotiation */
6500                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6501         }
6502
6503         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6504             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6505                 u32 tmp;
6506
6507                 tmp = tr32(SERDES_RX_CTRL);
6508                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6509                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6510                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6511                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6512         }
6513
6514         err = tg3_setup_phy(tp, reset_phy);
6515         if (err)
6516                 return err;
6517
6518         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6519                 u32 tmp;
6520
6521                 /* Clear CRC stats. */
6522                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6523                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6524                         tg3_readphy(tp, 0x14, &tmp);
6525                 }
6526         }
6527
6528         __tg3_set_rx_mode(tp->dev);
6529
6530         /* Initialize receive rules. */
6531         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6532         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6533         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6534         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6535
6536         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6537             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6538                 limit = 8;
6539         else
6540                 limit = 16;
6541         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6542                 limit -= 4;
6543         switch (limit) {
6544         case 16:
6545                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6546         case 15:
6547                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6548         case 14:
6549                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6550         case 13:
6551                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6552         case 12:
6553                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6554         case 11:
6555                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6556         case 10:
6557                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6558         case 9:
6559                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6560         case 8:
6561                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6562         case 7:
6563                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6564         case 6:
6565                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6566         case 5:
6567                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6568         case 4:
6569                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6570         case 3:
6571                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6572         case 2:
6573         case 1:
6574
6575         default:
6576                 break;
6577         };
6578
6579         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6580
6581         return 0;
6582 }
6583
6584 /* Called at device open time to get the chip ready for
6585  * packet processing.  Invoked with tp->lock held.
6586  */
6587 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6588 {
6589         int err;
6590
6591         /* Force the chip into D0. */
6592         err = tg3_set_power_state(tp, PCI_D0);
6593         if (err)
6594                 goto out;
6595
6596         tg3_switch_clocks(tp);
6597
6598         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6599
6600         err = tg3_reset_hw(tp, reset_phy);
6601
6602 out:
6603         return err;
6604 }
6605
6606 #define TG3_STAT_ADD32(PSTAT, REG) \
6607 do {    u32 __val = tr32(REG); \
6608         (PSTAT)->low += __val; \
6609         if ((PSTAT)->low < __val) \
6610                 (PSTAT)->high += 1; \
6611 } while (0)
6612
6613 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6614 {
6615         struct tg3_hw_stats *sp = tp->hw_stats;
6616
6617         if (!netif_carrier_ok(tp->dev))
6618                 return;
6619
6620         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6621         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6622         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6623         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6624         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6625         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6626         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6627         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6628         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6629         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6630         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6631         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6632         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6633
6634         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6635         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6636         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6637         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6638         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6639         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6640         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6641         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6642         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6643         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6644         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6645         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6646         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6647         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6648
6649         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6650         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6651         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6652 }
6653
6654 static void tg3_timer(unsigned long __opaque)
6655 {
6656         struct tg3 *tp = (struct tg3 *) __opaque;
6657
6658         if (tp->irq_sync)
6659                 goto restart_timer;
6660
6661         spin_lock(&tp->lock);
6662
6663         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6664                 /* All of this garbage is because when using non-tagged
6665                  * IRQ status the mailbox/status_block protocol the chip
6666                  * uses with the cpu is race prone.
6667                  */
6668                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6669                         tw32(GRC_LOCAL_CTRL,
6670                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6671                 } else {
6672                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6673                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6674                 }
6675
6676                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6677                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6678                         spin_unlock(&tp->lock);
6679                         schedule_work(&tp->reset_task);
6680                         return;
6681                 }
6682         }
6683
6684         /* This part only runs once per second. */
6685         if (!--tp->timer_counter) {
6686                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6687                         tg3_periodic_fetch_stats(tp);
6688
6689                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6690                         u32 mac_stat;
6691                         int phy_event;
6692
6693                         mac_stat = tr32(MAC_STATUS);
6694
6695                         phy_event = 0;
6696                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6697                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6698                                         phy_event = 1;
6699                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6700                                 phy_event = 1;
6701
6702                         if (phy_event)
6703                                 tg3_setup_phy(tp, 0);
6704                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6705                         u32 mac_stat = tr32(MAC_STATUS);
6706                         int need_setup = 0;
6707
6708                         if (netif_carrier_ok(tp->dev) &&
6709                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6710                                 need_setup = 1;
6711                         }
6712                         if (! netif_carrier_ok(tp->dev) &&
6713                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6714                                          MAC_STATUS_SIGNAL_DET))) {
6715                                 need_setup = 1;
6716                         }
6717                         if (need_setup) {
6718                                 tw32_f(MAC_MODE,
6719                                      (tp->mac_mode &
6720                                       ~MAC_MODE_PORT_MODE_MASK));
6721                                 udelay(40);
6722                                 tw32_f(MAC_MODE, tp->mac_mode);
6723                                 udelay(40);
6724                                 tg3_setup_phy(tp, 0);
6725                         }
6726                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6727                         tg3_serdes_parallel_detect(tp);
6728
6729                 tp->timer_counter = tp->timer_multiplier;
6730         }
6731
6732         /* Heartbeat is only sent once every 2 seconds.  */
6733         if (!--tp->asf_counter) {
6734                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6735                         u32 val;
6736
6737                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6738                                       FWCMD_NICDRV_ALIVE2);
6739                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6740                         /* 5 seconds timeout */
6741                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6742                         val = tr32(GRC_RX_CPU_EVENT);
6743                         val |= (1 << 14);
6744                         tw32(GRC_RX_CPU_EVENT, val);
6745                 }
6746                 tp->asf_counter = tp->asf_multiplier;
6747         }
6748
6749         spin_unlock(&tp->lock);
6750
6751 restart_timer:
6752         tp->timer.expires = jiffies + tp->timer_offset;
6753         add_timer(&tp->timer);
6754 }
6755
6756 static int tg3_request_irq(struct tg3 *tp)
6757 {
6758         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6759         unsigned long flags;
6760         struct net_device *dev = tp->dev;
6761
6762         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6763                 fn = tg3_msi;
6764                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6765                         fn = tg3_msi_1shot;
6766                 flags = IRQF_SAMPLE_RANDOM;
6767         } else {
6768                 fn = tg3_interrupt;
6769                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6770                         fn = tg3_interrupt_tagged;
6771                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6772         }
6773         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6774 }
6775
6776 static int tg3_test_interrupt(struct tg3 *tp)
6777 {
6778         struct net_device *dev = tp->dev;
6779         int err, i;
6780         u32 int_mbox = 0;
6781
6782         if (!netif_running(dev))
6783                 return -ENODEV;
6784
6785         tg3_disable_ints(tp);
6786
6787         free_irq(tp->pdev->irq, dev);
6788
6789         err = request_irq(tp->pdev->irq, tg3_test_isr,
6790                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6791         if (err)
6792                 return err;
6793
6794         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6795         tg3_enable_ints(tp);
6796
6797         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6798                HOSTCC_MODE_NOW);
6799
6800         for (i = 0; i < 5; i++) {
6801                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6802                                         TG3_64BIT_REG_LOW);
6803                 if (int_mbox != 0)
6804                         break;
6805                 msleep(10);
6806         }
6807
6808         tg3_disable_ints(tp);
6809
6810         free_irq(tp->pdev->irq, dev);
6811         
6812         err = tg3_request_irq(tp);
6813
6814         if (err)
6815                 return err;
6816
6817         if (int_mbox != 0)
6818                 return 0;
6819
6820         return -EIO;
6821 }
6822
6823 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6824  * successfully restored
6825  */
6826 static int tg3_test_msi(struct tg3 *tp)
6827 {
6828         struct net_device *dev = tp->dev;
6829         int err;
6830         u16 pci_cmd;
6831
6832         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6833                 return 0;
6834
6835         /* Turn off SERR reporting in case MSI terminates with Master
6836          * Abort.
6837          */
6838         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6839         pci_write_config_word(tp->pdev, PCI_COMMAND,
6840                               pci_cmd & ~PCI_COMMAND_SERR);
6841
6842         err = tg3_test_interrupt(tp);
6843
6844         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6845
6846         if (!err)
6847                 return 0;
6848
6849         /* other failures */
6850         if (err != -EIO)
6851                 return err;
6852
6853         /* MSI test failed, go back to INTx mode */
6854         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6855                "switching to INTx mode. Please report this failure to "
6856                "the PCI maintainer and include system chipset information.\n",
6857                        tp->dev->name);
6858
6859         free_irq(tp->pdev->irq, dev);
6860         pci_disable_msi(tp->pdev);
6861
6862         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6863
6864         err = tg3_request_irq(tp);
6865         if (err)
6866                 return err;
6867
6868         /* Need to reset the chip because the MSI cycle may have terminated
6869          * with Master Abort.
6870          */
6871         tg3_full_lock(tp, 1);
6872
6873         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6874         err = tg3_init_hw(tp, 1);
6875
6876         tg3_full_unlock(tp);
6877
6878         if (err)
6879                 free_irq(tp->pdev->irq, dev);
6880
6881         return err;
6882 }
6883
6884 static int tg3_open(struct net_device *dev)
6885 {
6886         struct tg3 *tp = netdev_priv(dev);
6887         int err;
6888
6889         tg3_full_lock(tp, 0);
6890
6891         err = tg3_set_power_state(tp, PCI_D0);
6892         if (err) {
6893                 tg3_full_unlock(tp);
6894                 return err;
6895         }
6896
6897         tg3_disable_ints(tp);
6898         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6899
6900         tg3_full_unlock(tp);
6901
6902         /* The placement of this call is tied
6903          * to the setup and use of Host TX descriptors.
6904          */
6905         err = tg3_alloc_consistent(tp);
6906         if (err)
6907                 return err;
6908
6909         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6910             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6911             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6912             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6913               (tp->pdev_peer == tp->pdev))) {
6914                 /* All MSI supporting chips should support tagged
6915                  * status.  Assert that this is the case.
6916                  */
6917                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6918                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6919                                "Not using MSI.\n", tp->dev->name);
6920                 } else if (pci_enable_msi(tp->pdev) == 0) {
6921                         u32 msi_mode;
6922
6923                         msi_mode = tr32(MSGINT_MODE);
6924                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6925                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6926                 }
6927         }
6928         err = tg3_request_irq(tp);
6929
6930         if (err) {
6931                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6932                         pci_disable_msi(tp->pdev);
6933                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6934                 }
6935                 tg3_free_consistent(tp);
6936                 return err;
6937         }
6938
6939         tg3_full_lock(tp, 0);
6940
6941         err = tg3_init_hw(tp, 1);
6942         if (err) {
6943                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6944                 tg3_free_rings(tp);
6945         } else {
6946                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6947                         tp->timer_offset = HZ;
6948                 else
6949                         tp->timer_offset = HZ / 10;
6950
6951                 BUG_ON(tp->timer_offset > HZ);
6952                 tp->timer_counter = tp->timer_multiplier =
6953                         (HZ / tp->timer_offset);
6954                 tp->asf_counter = tp->asf_multiplier =
6955                         ((HZ / tp->timer_offset) * 2);
6956
6957                 init_timer(&tp->timer);
6958                 tp->timer.expires = jiffies + tp->timer_offset;
6959                 tp->timer.data = (unsigned long) tp;
6960                 tp->timer.function = tg3_timer;
6961         }
6962
6963         tg3_full_unlock(tp);
6964
6965         if (err) {
6966                 free_irq(tp->pdev->irq, dev);
6967                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6968                         pci_disable_msi(tp->pdev);
6969                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6970                 }
6971                 tg3_free_consistent(tp);
6972                 return err;
6973         }
6974
6975         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6976                 err = tg3_test_msi(tp);
6977
6978                 if (err) {
6979                         tg3_full_lock(tp, 0);
6980
6981                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6982                                 pci_disable_msi(tp->pdev);
6983                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6984                         }
6985                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6986                         tg3_free_rings(tp);
6987                         tg3_free_consistent(tp);
6988
6989                         tg3_full_unlock(tp);
6990
6991                         return err;
6992                 }
6993
6994                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6995                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6996                                 u32 val = tr32(0x7c04);
6997
6998                                 tw32(0x7c04, val | (1 << 29));
6999                         }
7000                 }
7001         }
7002
7003         tg3_full_lock(tp, 0);
7004
7005         add_timer(&tp->timer);
7006         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7007         tg3_enable_ints(tp);
7008
7009         tg3_full_unlock(tp);
7010
7011         netif_start_queue(dev);
7012
7013         return 0;
7014 }
7015
7016 #if 0
7017 /*static*/ void tg3_dump_state(struct tg3 *tp)
7018 {
7019         u32 val32, val32_2, val32_3, val32_4, val32_5;
7020         u16 val16;
7021         int i;
7022
7023         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7024         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7025         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7026                val16, val32);
7027
7028         /* MAC block */
7029         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7030                tr32(MAC_MODE), tr32(MAC_STATUS));
7031         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7032                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7033         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7034                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7035         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7036                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7037
7038         /* Send data initiator control block */
7039         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7040                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7041         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7042                tr32(SNDDATAI_STATSCTRL));
7043
7044         /* Send data completion control block */
7045         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7046
7047         /* Send BD ring selector block */
7048         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7049                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7050
7051         /* Send BD initiator control block */
7052         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7053                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7054
7055         /* Send BD completion control block */
7056         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7057
7058         /* Receive list placement control block */
7059         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7060                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7061         printk("       RCVLPC_STATSCTRL[%08x]\n",
7062                tr32(RCVLPC_STATSCTRL));
7063
7064         /* Receive data and receive BD initiator control block */
7065         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7066                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7067
7068         /* Receive data completion control block */
7069         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7070                tr32(RCVDCC_MODE));
7071
7072         /* Receive BD initiator control block */
7073         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7074                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7075
7076         /* Receive BD completion control block */
7077         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7078                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7079
7080         /* Receive list selector control block */
7081         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7082                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7083
7084         /* Mbuf cluster free block */
7085         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7086                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7087
7088         /* Host coalescing control block */
7089         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7090                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7091         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7092                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7093                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7094         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7095                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7096                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7097         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7098                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7099         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7100                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7101
7102         /* Memory arbiter control block */
7103         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7104                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7105
7106         /* Buffer manager control block */
7107         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7108                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7109         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7110                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7111         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7112                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7113                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7114                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7115
7116         /* Read DMA control block */
7117         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7118                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7119
7120         /* Write DMA control block */
7121         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7122                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7123
7124         /* DMA completion block */
7125         printk("DEBUG: DMAC_MODE[%08x]\n",
7126                tr32(DMAC_MODE));
7127
7128         /* GRC block */
7129         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7130                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7131         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7132                tr32(GRC_LOCAL_CTRL));
7133
7134         /* TG3_BDINFOs */
7135         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7136                tr32(RCVDBDI_JUMBO_BD + 0x0),
7137                tr32(RCVDBDI_JUMBO_BD + 0x4),
7138                tr32(RCVDBDI_JUMBO_BD + 0x8),
7139                tr32(RCVDBDI_JUMBO_BD + 0xc));
7140         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7141                tr32(RCVDBDI_STD_BD + 0x0),
7142                tr32(RCVDBDI_STD_BD + 0x4),
7143                tr32(RCVDBDI_STD_BD + 0x8),
7144                tr32(RCVDBDI_STD_BD + 0xc));
7145         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7146                tr32(RCVDBDI_MINI_BD + 0x0),
7147                tr32(RCVDBDI_MINI_BD + 0x4),
7148                tr32(RCVDBDI_MINI_BD + 0x8),
7149                tr32(RCVDBDI_MINI_BD + 0xc));
7150
7151         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7152         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7153         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7154         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7155         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7156                val32, val32_2, val32_3, val32_4);
7157
7158         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7159         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7160         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7161         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7162         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7163                val32, val32_2, val32_3, val32_4);
7164
7165         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7166         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7167         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7168         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7169         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7170         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7171                val32, val32_2, val32_3, val32_4, val32_5);
7172
7173         /* SW status block */
7174         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7175                tp->hw_status->status,
7176                tp->hw_status->status_tag,
7177                tp->hw_status->rx_jumbo_consumer,
7178                tp->hw_status->rx_consumer,
7179                tp->hw_status->rx_mini_consumer,
7180                tp->hw_status->idx[0].rx_producer,
7181                tp->hw_status->idx[0].tx_consumer);
7182
7183         /* SW statistics block */
7184         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7185                ((u32 *)tp->hw_stats)[0],
7186                ((u32 *)tp->hw_stats)[1],
7187                ((u32 *)tp->hw_stats)[2],
7188                ((u32 *)tp->hw_stats)[3]);
7189
7190         /* Mailboxes */
7191         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7192                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7193                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7194                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7195                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7196
7197         /* NIC side send descriptors. */
7198         for (i = 0; i < 6; i++) {
7199                 unsigned long txd;
7200
7201                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7202                         + (i * sizeof(struct tg3_tx_buffer_desc));
7203                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7204                        i,
7205                        readl(txd + 0x0), readl(txd + 0x4),
7206                        readl(txd + 0x8), readl(txd + 0xc));
7207         }
7208
7209         /* NIC side RX descriptors. */
7210         for (i = 0; i < 6; i++) {
7211                 unsigned long rxd;
7212
7213                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7214                         + (i * sizeof(struct tg3_rx_buffer_desc));
7215                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7216                        i,
7217                        readl(rxd + 0x0), readl(rxd + 0x4),
7218                        readl(rxd + 0x8), readl(rxd + 0xc));
7219                 rxd += (4 * sizeof(u32));
7220                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7221                        i,
7222                        readl(rxd + 0x0), readl(rxd + 0x4),
7223                        readl(rxd + 0x8), readl(rxd + 0xc));
7224         }
7225
7226         for (i = 0; i < 6; i++) {
7227                 unsigned long rxd;
7228
7229                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7230                         + (i * sizeof(struct tg3_rx_buffer_desc));
7231                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7232                        i,
7233                        readl(rxd + 0x0), readl(rxd + 0x4),
7234                        readl(rxd + 0x8), readl(rxd + 0xc));
7235                 rxd += (4 * sizeof(u32));
7236                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7237                        i,
7238                        readl(rxd + 0x0), readl(rxd + 0x4),
7239                        readl(rxd + 0x8), readl(rxd + 0xc));
7240         }
7241 }
7242 #endif
7243
7244 static struct net_device_stats *tg3_get_stats(struct net_device *);
7245 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7246
7247 static int tg3_close(struct net_device *dev)
7248 {
7249         struct tg3 *tp = netdev_priv(dev);
7250
7251         /* Calling flush_scheduled_work() may deadlock because
7252          * linkwatch_event() may be on the workqueue and it will try to get
7253          * the rtnl_lock which we are holding.
7254          */
7255         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7256                 msleep(1);
7257
7258         netif_stop_queue(dev);
7259
7260         del_timer_sync(&tp->timer);
7261
7262         tg3_full_lock(tp, 1);
7263 #if 0
7264         tg3_dump_state(tp);
7265 #endif
7266
7267         tg3_disable_ints(tp);
7268
7269         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7270         tg3_free_rings(tp);
7271         tp->tg3_flags &=
7272                 ~(TG3_FLAG_INIT_COMPLETE |
7273                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7274
7275         tg3_full_unlock(tp);
7276
7277         free_irq(tp->pdev->irq, dev);
7278         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7279                 pci_disable_msi(tp->pdev);
7280                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7281         }
7282
7283         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7284                sizeof(tp->net_stats_prev));
7285         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7286                sizeof(tp->estats_prev));
7287
7288         tg3_free_consistent(tp);
7289
7290         tg3_set_power_state(tp, PCI_D3hot);
7291
7292         netif_carrier_off(tp->dev);
7293
7294         return 0;
7295 }
7296
7297 static inline unsigned long get_stat64(tg3_stat64_t *val)
7298 {
7299         unsigned long ret;
7300
7301 #if (BITS_PER_LONG == 32)
7302         ret = val->low;
7303 #else
7304         ret = ((u64)val->high << 32) | ((u64)val->low);
7305 #endif
7306         return ret;
7307 }
7308
7309 static unsigned long calc_crc_errors(struct tg3 *tp)
7310 {
7311         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7312
7313         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7314             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7315              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7316                 u32 val;
7317
7318                 spin_lock_bh(&tp->lock);
7319                 if (!tg3_readphy(tp, 0x1e, &val)) {
7320                         tg3_writephy(tp, 0x1e, val | 0x8000);
7321                         tg3_readphy(tp, 0x14, &val);
7322                 } else
7323                         val = 0;
7324                 spin_unlock_bh(&tp->lock);
7325
7326                 tp->phy_crc_errors += val;
7327
7328                 return tp->phy_crc_errors;
7329         }
7330
7331         return get_stat64(&hw_stats->rx_fcs_errors);
7332 }
7333
7334 #define ESTAT_ADD(member) \
7335         estats->member =        old_estats->member + \
7336                                 get_stat64(&hw_stats->member)
7337
7338 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7339 {
7340         struct tg3_ethtool_stats *estats = &tp->estats;
7341         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7342         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7343
7344         if (!hw_stats)
7345                 return old_estats;
7346
7347         ESTAT_ADD(rx_octets);
7348         ESTAT_ADD(rx_fragments);
7349         ESTAT_ADD(rx_ucast_packets);
7350         ESTAT_ADD(rx_mcast_packets);
7351         ESTAT_ADD(rx_bcast_packets);
7352         ESTAT_ADD(rx_fcs_errors);
7353         ESTAT_ADD(rx_align_errors);
7354         ESTAT_ADD(rx_xon_pause_rcvd);
7355         ESTAT_ADD(rx_xoff_pause_rcvd);
7356         ESTAT_ADD(rx_mac_ctrl_rcvd);
7357         ESTAT_ADD(rx_xoff_entered);
7358         ESTAT_ADD(rx_frame_too_long_errors);
7359         ESTAT_ADD(rx_jabbers);
7360         ESTAT_ADD(rx_undersize_packets);
7361         ESTAT_ADD(rx_in_length_errors);
7362         ESTAT_ADD(rx_out_length_errors);
7363         ESTAT_ADD(rx_64_or_less_octet_packets);
7364         ESTAT_ADD(rx_65_to_127_octet_packets);
7365         ESTAT_ADD(rx_128_to_255_octet_packets);
7366         ESTAT_ADD(rx_256_to_511_octet_packets);
7367         ESTAT_ADD(rx_512_to_1023_octet_packets);
7368         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7369         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7370         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7371         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7372         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7373
7374         ESTAT_ADD(tx_octets);
7375         ESTAT_ADD(tx_collisions);
7376         ESTAT_ADD(tx_xon_sent);
7377         ESTAT_ADD(tx_xoff_sent);
7378         ESTAT_ADD(tx_flow_control);
7379         ESTAT_ADD(tx_mac_errors);
7380         ESTAT_ADD(tx_single_collisions);
7381         ESTAT_ADD(tx_mult_collisions);
7382         ESTAT_ADD(tx_deferred);
7383         ESTAT_ADD(tx_excessive_collisions);
7384         ESTAT_ADD(tx_late_collisions);
7385         ESTAT_ADD(tx_collide_2times);
7386         ESTAT_ADD(tx_collide_3times);
7387         ESTAT_ADD(tx_collide_4times);
7388         ESTAT_ADD(tx_collide_5times);
7389         ESTAT_ADD(tx_collide_6times);
7390         ESTAT_ADD(tx_collide_7times);
7391         ESTAT_ADD(tx_collide_8times);
7392         ESTAT_ADD(tx_collide_9times);
7393         ESTAT_ADD(tx_collide_10times);
7394         ESTAT_ADD(tx_collide_11times);
7395         ESTAT_ADD(tx_collide_12times);
7396         ESTAT_ADD(tx_collide_13times);
7397         ESTAT_ADD(tx_collide_14times);
7398         ESTAT_ADD(tx_collide_15times);
7399         ESTAT_ADD(tx_ucast_packets);
7400         ESTAT_ADD(tx_mcast_packets);
7401         ESTAT_ADD(tx_bcast_packets);
7402         ESTAT_ADD(tx_carrier_sense_errors);
7403         ESTAT_ADD(tx_discards);
7404         ESTAT_ADD(tx_errors);
7405
7406         ESTAT_ADD(dma_writeq_full);
7407         ESTAT_ADD(dma_write_prioq_full);
7408         ESTAT_ADD(rxbds_empty);
7409         ESTAT_ADD(rx_discards);
7410         ESTAT_ADD(rx_errors);
7411         ESTAT_ADD(rx_threshold_hit);
7412
7413         ESTAT_ADD(dma_readq_full);
7414         ESTAT_ADD(dma_read_prioq_full);
7415         ESTAT_ADD(tx_comp_queue_full);
7416
7417         ESTAT_ADD(ring_set_send_prod_index);
7418         ESTAT_ADD(ring_status_update);
7419         ESTAT_ADD(nic_irqs);
7420         ESTAT_ADD(nic_avoided_irqs);
7421         ESTAT_ADD(nic_tx_threshold_hit);
7422
7423         return estats;
7424 }
7425
7426 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7427 {
7428         struct tg3 *tp = netdev_priv(dev);
7429         struct net_device_stats *stats = &tp->net_stats;
7430         struct net_device_stats *old_stats = &tp->net_stats_prev;
7431         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7432
7433         if (!hw_stats)
7434                 return old_stats;
7435
7436         stats->rx_packets = old_stats->rx_packets +
7437                 get_stat64(&hw_stats->rx_ucast_packets) +
7438                 get_stat64(&hw_stats->rx_mcast_packets) +
7439                 get_stat64(&hw_stats->rx_bcast_packets);
7440                 
7441         stats->tx_packets = old_stats->tx_packets +
7442                 get_stat64(&hw_stats->tx_ucast_packets) +
7443                 get_stat64(&hw_stats->tx_mcast_packets) +
7444                 get_stat64(&hw_stats->tx_bcast_packets);
7445
7446         stats->rx_bytes = old_stats->rx_bytes +
7447                 get_stat64(&hw_stats->rx_octets);
7448         stats->tx_bytes = old_stats->tx_bytes +
7449                 get_stat64(&hw_stats->tx_octets);
7450
7451         stats->rx_errors = old_stats->rx_errors +
7452                 get_stat64(&hw_stats->rx_errors);
7453         stats->tx_errors = old_stats->tx_errors +
7454                 get_stat64(&hw_stats->tx_errors) +
7455                 get_stat64(&hw_stats->tx_mac_errors) +
7456                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7457                 get_stat64(&hw_stats->tx_discards);
7458
7459         stats->multicast = old_stats->multicast +
7460                 get_stat64(&hw_stats->rx_mcast_packets);
7461         stats->collisions = old_stats->collisions +
7462                 get_stat64(&hw_stats->tx_collisions);
7463
7464         stats->rx_length_errors = old_stats->rx_length_errors +
7465                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7466                 get_stat64(&hw_stats->rx_undersize_packets);
7467
7468         stats->rx_over_errors = old_stats->rx_over_errors +
7469                 get_stat64(&hw_stats->rxbds_empty);
7470         stats->rx_frame_errors = old_stats->rx_frame_errors +
7471                 get_stat64(&hw_stats->rx_align_errors);
7472         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7473                 get_stat64(&hw_stats->tx_discards);
7474         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7475                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7476
7477         stats->rx_crc_errors = old_stats->rx_crc_errors +
7478                 calc_crc_errors(tp);
7479
7480         stats->rx_missed_errors = old_stats->rx_missed_errors +
7481                 get_stat64(&hw_stats->rx_discards);
7482
7483         return stats;
7484 }
7485
7486 static inline u32 calc_crc(unsigned char *buf, int len)
7487 {
7488         u32 reg;
7489         u32 tmp;
7490         int j, k;
7491
7492         reg = 0xffffffff;
7493
7494         for (j = 0; j < len; j++) {
7495                 reg ^= buf[j];
7496
7497                 for (k = 0; k < 8; k++) {
7498                         tmp = reg & 0x01;
7499
7500                         reg >>= 1;
7501
7502                         if (tmp) {
7503                                 reg ^= 0xedb88320;
7504                         }
7505                 }
7506         }
7507
7508         return ~reg;
7509 }
7510
7511 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7512 {
7513         /* accept or reject all multicast frames */
7514         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7515         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7516         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7517         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7518 }
7519
7520 static void __tg3_set_rx_mode(struct net_device *dev)
7521 {
7522         struct tg3 *tp = netdev_priv(dev);
7523         u32 rx_mode;
7524
7525         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7526                                   RX_MODE_KEEP_VLAN_TAG);
7527
7528         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7529          * flag clear.
7530          */
7531 #if TG3_VLAN_TAG_USED
7532         if (!tp->vlgrp &&
7533             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7534                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7535 #else
7536         /* By definition, VLAN is disabled always in this
7537          * case.
7538          */
7539         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7540                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7541 #endif
7542
7543         if (dev->flags & IFF_PROMISC) {
7544                 /* Promiscuous mode. */
7545                 rx_mode |= RX_MODE_PROMISC;
7546         } else if (dev->flags & IFF_ALLMULTI) {
7547                 /* Accept all multicast. */
7548                 tg3_set_multi (tp, 1);
7549         } else if (dev->mc_count < 1) {
7550                 /* Reject all multicast. */
7551                 tg3_set_multi (tp, 0);
7552         } else {
7553                 /* Accept one or more multicast(s). */
7554                 struct dev_mc_list *mclist;
7555                 unsigned int i;
7556                 u32 mc_filter[4] = { 0, };
7557                 u32 regidx;
7558                 u32 bit;
7559                 u32 crc;
7560
7561                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7562                      i++, mclist = mclist->next) {
7563
7564                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7565                         bit = ~crc & 0x7f;
7566                         regidx = (bit & 0x60) >> 5;
7567                         bit &= 0x1f;
7568                         mc_filter[regidx] |= (1 << bit);
7569                 }
7570
7571                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7572                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7573                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7574                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7575         }
7576
7577         if (rx_mode != tp->rx_mode) {
7578                 tp->rx_mode = rx_mode;
7579                 tw32_f(MAC_RX_MODE, rx_mode);
7580                 udelay(10);
7581         }
7582 }
7583
7584 static void tg3_set_rx_mode(struct net_device *dev)
7585 {
7586         struct tg3 *tp = netdev_priv(dev);
7587
7588         if (!netif_running(dev))
7589                 return;
7590
7591         tg3_full_lock(tp, 0);
7592         __tg3_set_rx_mode(dev);
7593         tg3_full_unlock(tp);
7594 }
7595
7596 #define TG3_REGDUMP_LEN         (32 * 1024)
7597
7598 static int tg3_get_regs_len(struct net_device *dev)
7599 {
7600         return TG3_REGDUMP_LEN;
7601 }
7602
7603 static void tg3_get_regs(struct net_device *dev,
7604                 struct ethtool_regs *regs, void *_p)
7605 {
7606         u32 *p = _p;
7607         struct tg3 *tp = netdev_priv(dev);
7608         u8 *orig_p = _p;
7609         int i;
7610
7611         regs->version = 0;
7612
7613         memset(p, 0, TG3_REGDUMP_LEN);
7614
7615         if (tp->link_config.phy_is_low_power)
7616                 return;
7617
7618         tg3_full_lock(tp, 0);
7619
7620 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7621 #define GET_REG32_LOOP(base,len)                \
7622 do {    p = (u32 *)(orig_p + (base));           \
7623         for (i = 0; i < len; i += 4)            \
7624                 __GET_REG32((base) + i);        \
7625 } while (0)
7626 #define GET_REG32_1(reg)                        \
7627 do {    p = (u32 *)(orig_p + (reg));            \
7628         __GET_REG32((reg));                     \
7629 } while (0)
7630
7631         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7632         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7633         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7634         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7635         GET_REG32_1(SNDDATAC_MODE);
7636         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7637         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7638         GET_REG32_1(SNDBDC_MODE);
7639         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7640         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7641         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7642         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7643         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7644         GET_REG32_1(RCVDCC_MODE);
7645         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7646         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7647         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7648         GET_REG32_1(MBFREE_MODE);
7649         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7650         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7651         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7652         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7653         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7654         GET_REG32_1(RX_CPU_MODE);
7655         GET_REG32_1(RX_CPU_STATE);
7656         GET_REG32_1(RX_CPU_PGMCTR);
7657         GET_REG32_1(RX_CPU_HWBKPT);
7658         GET_REG32_1(TX_CPU_MODE);
7659         GET_REG32_1(TX_CPU_STATE);
7660         GET_REG32_1(TX_CPU_PGMCTR);
7661         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7662         GET_REG32_LOOP(FTQ_RESET, 0x120);
7663         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7664         GET_REG32_1(DMAC_MODE);
7665         GET_REG32_LOOP(GRC_MODE, 0x4c);
7666         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7667                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7668
7669 #undef __GET_REG32
7670 #undef GET_REG32_LOOP
7671 #undef GET_REG32_1
7672
7673         tg3_full_unlock(tp);
7674 }
7675
7676 static int tg3_get_eeprom_len(struct net_device *dev)
7677 {
7678         struct tg3 *tp = netdev_priv(dev);
7679
7680         return tp->nvram_size;
7681 }
7682
7683 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7684 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7685
7686 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7687 {
7688         struct tg3 *tp = netdev_priv(dev);
7689         int ret;
7690         u8  *pd;
7691         u32 i, offset, len, val, b_offset, b_count;
7692
7693         if (tp->link_config.phy_is_low_power)
7694                 return -EAGAIN;
7695
7696         offset = eeprom->offset;
7697         len = eeprom->len;
7698         eeprom->len = 0;
7699
7700         eeprom->magic = TG3_EEPROM_MAGIC;
7701
7702         if (offset & 3) {
7703                 /* adjustments to start on required 4 byte boundary */
7704                 b_offset = offset & 3;
7705                 b_count = 4 - b_offset;
7706                 if (b_count > len) {
7707                         /* i.e. offset=1 len=2 */
7708                         b_count = len;
7709                 }
7710                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7711                 if (ret)
7712                         return ret;
7713                 val = cpu_to_le32(val);
7714                 memcpy(data, ((char*)&val) + b_offset, b_count);
7715                 len -= b_count;
7716                 offset += b_count;
7717                 eeprom->len += b_count;
7718         }
7719
7720         /* read bytes upto the last 4 byte boundary */
7721         pd = &data[eeprom->len];
7722         for (i = 0; i < (len - (len & 3)); i += 4) {
7723                 ret = tg3_nvram_read(tp, offset + i, &val);
7724                 if (ret) {
7725                         eeprom->len += i;
7726                         return ret;
7727                 }
7728                 val = cpu_to_le32(val);
7729                 memcpy(pd + i, &val, 4);
7730         }
7731         eeprom->len += i;
7732
7733         if (len & 3) {
7734                 /* read last bytes not ending on 4 byte boundary */
7735                 pd = &data[eeprom->len];
7736                 b_count = len & 3;
7737                 b_offset = offset + len - b_count;
7738                 ret = tg3_nvram_read(tp, b_offset, &val);
7739                 if (ret)
7740                         return ret;
7741                 val = cpu_to_le32(val);
7742                 memcpy(pd, ((char*)&val), b_count);
7743                 eeprom->len += b_count;
7744         }
7745         return 0;
7746 }
7747
7748 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7749
7750 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7751 {
7752         struct tg3 *tp = netdev_priv(dev);
7753         int ret;
7754         u32 offset, len, b_offset, odd_len, start, end;
7755         u8 *buf;
7756
7757         if (tp->link_config.phy_is_low_power)
7758                 return -EAGAIN;
7759
7760         if (eeprom->magic != TG3_EEPROM_MAGIC)
7761                 return -EINVAL;
7762
7763         offset = eeprom->offset;
7764         len = eeprom->len;
7765
7766         if ((b_offset = (offset & 3))) {
7767                 /* adjustments to start on required 4 byte boundary */
7768                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7769                 if (ret)
7770                         return ret;
7771                 start = cpu_to_le32(start);
7772                 len += b_offset;
7773                 offset &= ~3;
7774                 if (len < 4)
7775                         len = 4;
7776         }
7777
7778         odd_len = 0;
7779         if (len & 3) {
7780                 /* adjustments to end on required 4 byte boundary */
7781                 odd_len = 1;
7782                 len = (len + 3) & ~3;
7783                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7784                 if (ret)
7785                         return ret;
7786                 end = cpu_to_le32(end);
7787         }
7788
7789         buf = data;
7790         if (b_offset || odd_len) {
7791                 buf = kmalloc(len, GFP_KERNEL);
7792                 if (buf == 0)
7793                         return -ENOMEM;
7794                 if (b_offset)
7795                         memcpy(buf, &start, 4);
7796                 if (odd_len)
7797                         memcpy(buf+len-4, &end, 4);
7798                 memcpy(buf + b_offset, data, eeprom->len);
7799         }
7800
7801         ret = tg3_nvram_write_block(tp, offset, len, buf);
7802
7803         if (buf != data)
7804                 kfree(buf);
7805
7806         return ret;
7807 }
7808
7809 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7810 {
7811         struct tg3 *tp = netdev_priv(dev);
7812   
7813         cmd->supported = (SUPPORTED_Autoneg);
7814
7815         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7816                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7817                                    SUPPORTED_1000baseT_Full);
7818
7819         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7820                 cmd->supported |= (SUPPORTED_100baseT_Half |
7821                                   SUPPORTED_100baseT_Full |
7822                                   SUPPORTED_10baseT_Half |
7823                                   SUPPORTED_10baseT_Full |
7824                                   SUPPORTED_MII);
7825                 cmd->port = PORT_TP;
7826         } else {
7827                 cmd->supported |= SUPPORTED_FIBRE;
7828                 cmd->port = PORT_FIBRE;
7829         }
7830   
7831         cmd->advertising = tp->link_config.advertising;
7832         if (netif_running(dev)) {
7833                 cmd->speed = tp->link_config.active_speed;
7834                 cmd->duplex = tp->link_config.active_duplex;
7835         }
7836         cmd->phy_address = PHY_ADDR;
7837         cmd->transceiver = 0;
7838         cmd->autoneg = tp->link_config.autoneg;
7839         cmd->maxtxpkt = 0;
7840         cmd->maxrxpkt = 0;
7841         return 0;
7842 }
7843   
7844 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7845 {
7846         struct tg3 *tp = netdev_priv(dev);
7847   
7848         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7849                 /* These are the only valid advertisement bits allowed.  */
7850                 if (cmd->autoneg == AUTONEG_ENABLE &&
7851                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7852                                           ADVERTISED_1000baseT_Full |
7853                                           ADVERTISED_Autoneg |
7854                                           ADVERTISED_FIBRE)))
7855                         return -EINVAL;
7856                 /* Fiber can only do SPEED_1000.  */
7857                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7858                          (cmd->speed != SPEED_1000))
7859                         return -EINVAL;
7860         /* Copper cannot force SPEED_1000.  */
7861         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7862                    (cmd->speed == SPEED_1000))
7863                 return -EINVAL;
7864         else if ((cmd->speed == SPEED_1000) &&
7865                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7866                 return -EINVAL;
7867
7868         tg3_full_lock(tp, 0);
7869
7870         tp->link_config.autoneg = cmd->autoneg;
7871         if (cmd->autoneg == AUTONEG_ENABLE) {
7872                 tp->link_config.advertising = cmd->advertising;
7873                 tp->link_config.speed = SPEED_INVALID;
7874                 tp->link_config.duplex = DUPLEX_INVALID;
7875         } else {
7876                 tp->link_config.advertising = 0;
7877                 tp->link_config.speed = cmd->speed;
7878                 tp->link_config.duplex = cmd->duplex;
7879         }
7880   
7881         if (netif_running(dev))
7882                 tg3_setup_phy(tp, 1);
7883
7884         tg3_full_unlock(tp);
7885   
7886         return 0;
7887 }
7888   
7889 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7890 {
7891         struct tg3 *tp = netdev_priv(dev);
7892   
7893         strcpy(info->driver, DRV_MODULE_NAME);
7894         strcpy(info->version, DRV_MODULE_VERSION);
7895         strcpy(info->fw_version, tp->fw_ver);
7896         strcpy(info->bus_info, pci_name(tp->pdev));
7897 }
7898   
7899 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7900 {
7901         struct tg3 *tp = netdev_priv(dev);
7902   
7903         wol->supported = WAKE_MAGIC;
7904         wol->wolopts = 0;
7905         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7906                 wol->wolopts = WAKE_MAGIC;
7907         memset(&wol->sopass, 0, sizeof(wol->sopass));
7908 }
7909   
7910 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7911 {
7912         struct tg3 *tp = netdev_priv(dev);
7913   
7914         if (wol->wolopts & ~WAKE_MAGIC)
7915                 return -EINVAL;
7916         if ((wol->wolopts & WAKE_MAGIC) &&
7917             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7918             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7919                 return -EINVAL;
7920   
7921         spin_lock_bh(&tp->lock);
7922         if (wol->wolopts & WAKE_MAGIC)
7923                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7924         else
7925                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7926         spin_unlock_bh(&tp->lock);
7927   
7928         return 0;
7929 }
7930   
7931 static u32 tg3_get_msglevel(struct net_device *dev)
7932 {
7933         struct tg3 *tp = netdev_priv(dev);
7934         return tp->msg_enable;
7935 }
7936   
7937 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7938 {
7939         struct tg3 *tp = netdev_priv(dev);
7940         tp->msg_enable = value;
7941 }
7942   
7943 #if TG3_TSO_SUPPORT != 0
7944 static int tg3_set_tso(struct net_device *dev, u32 value)
7945 {
7946         struct tg3 *tp = netdev_priv(dev);
7947
7948         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7949                 if (value)
7950                         return -EINVAL;
7951                 return 0;
7952         }
7953         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7954                 if (value)
7955                         dev->features |= NETIF_F_TSO6;
7956                 else
7957                         dev->features &= ~NETIF_F_TSO6;
7958         }
7959         return ethtool_op_set_tso(dev, value);
7960 }
7961 #endif
7962   
7963 static int tg3_nway_reset(struct net_device *dev)
7964 {
7965         struct tg3 *tp = netdev_priv(dev);
7966         u32 bmcr;
7967         int r;
7968   
7969         if (!netif_running(dev))
7970                 return -EAGAIN;
7971
7972         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7973                 return -EINVAL;
7974
7975         spin_lock_bh(&tp->lock);
7976         r = -EINVAL;
7977         tg3_readphy(tp, MII_BMCR, &bmcr);
7978         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7979             ((bmcr & BMCR_ANENABLE) ||
7980              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7981                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7982                                            BMCR_ANENABLE);
7983                 r = 0;
7984         }
7985         spin_unlock_bh(&tp->lock);
7986   
7987         return r;
7988 }
7989   
7990 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7991 {
7992         struct tg3 *tp = netdev_priv(dev);
7993   
7994         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7995         ering->rx_mini_max_pending = 0;
7996         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7997                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7998         else
7999                 ering->rx_jumbo_max_pending = 0;
8000
8001         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8002
8003         ering->rx_pending = tp->rx_pending;
8004         ering->rx_mini_pending = 0;
8005         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8006                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8007         else
8008                 ering->rx_jumbo_pending = 0;
8009
8010         ering->tx_pending = tp->tx_pending;
8011 }
8012   
8013 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8014 {
8015         struct tg3 *tp = netdev_priv(dev);
8016         int irq_sync = 0, err = 0;
8017   
8018         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8019             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8020             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8021                 return -EINVAL;
8022   
8023         if (netif_running(dev)) {
8024                 tg3_netif_stop(tp);
8025                 irq_sync = 1;
8026         }
8027
8028         tg3_full_lock(tp, irq_sync);
8029   
8030         tp->rx_pending = ering->rx_pending;
8031
8032         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8033             tp->rx_pending > 63)
8034                 tp->rx_pending = 63;
8035         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8036         tp->tx_pending = ering->tx_pending;
8037
8038         if (netif_running(dev)) {
8039                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8040                 err = tg3_restart_hw(tp, 1);
8041                 if (!err)
8042                         tg3_netif_start(tp);
8043         }
8044
8045         tg3_full_unlock(tp);
8046   
8047         return err;
8048 }
8049   
8050 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8051 {
8052         struct tg3 *tp = netdev_priv(dev);
8053   
8054         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8055         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8056         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8057 }
8058   
8059 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8060 {
8061         struct tg3 *tp = netdev_priv(dev);
8062         int irq_sync = 0, err = 0;
8063   
8064         if (netif_running(dev)) {
8065                 tg3_netif_stop(tp);
8066                 irq_sync = 1;
8067         }
8068
8069         tg3_full_lock(tp, irq_sync);
8070
8071         if (epause->autoneg)
8072                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8073         else
8074                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8075         if (epause->rx_pause)
8076                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8077         else
8078                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8079         if (epause->tx_pause)
8080                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8081         else
8082                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8083
8084         if (netif_running(dev)) {
8085                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8086                 err = tg3_restart_hw(tp, 1);
8087                 if (!err)
8088                         tg3_netif_start(tp);
8089         }
8090
8091         tg3_full_unlock(tp);
8092   
8093         return err;
8094 }
8095   
8096 static u32 tg3_get_rx_csum(struct net_device *dev)
8097 {
8098         struct tg3 *tp = netdev_priv(dev);
8099         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8100 }
8101   
8102 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8103 {
8104         struct tg3 *tp = netdev_priv(dev);
8105   
8106         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8107                 if (data != 0)
8108                         return -EINVAL;
8109                 return 0;
8110         }
8111   
8112         spin_lock_bh(&tp->lock);
8113         if (data)
8114                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8115         else
8116                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8117         spin_unlock_bh(&tp->lock);
8118   
8119         return 0;
8120 }
8121   
8122 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8123 {
8124         struct tg3 *tp = netdev_priv(dev);
8125   
8126         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8127                 if (data != 0)
8128                         return -EINVAL;
8129                 return 0;
8130         }
8131   
8132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8133             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8134                 ethtool_op_set_tx_hw_csum(dev, data);
8135         else
8136                 ethtool_op_set_tx_csum(dev, data);
8137
8138         return 0;
8139 }
8140
8141 static int tg3_get_stats_count (struct net_device *dev)
8142 {
8143         return TG3_NUM_STATS;
8144 }
8145
8146 static int tg3_get_test_count (struct net_device *dev)
8147 {
8148         return TG3_NUM_TEST;
8149 }
8150
8151 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8152 {
8153         switch (stringset) {
8154         case ETH_SS_STATS:
8155                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8156                 break;
8157         case ETH_SS_TEST:
8158                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8159                 break;
8160         default:
8161                 WARN_ON(1);     /* we need a WARN() */
8162                 break;
8163         }
8164 }
8165
8166 static int tg3_phys_id(struct net_device *dev, u32 data)
8167 {
8168         struct tg3 *tp = netdev_priv(dev);
8169         int i;
8170
8171         if (!netif_running(tp->dev))
8172                 return -EAGAIN;
8173
8174         if (data == 0)
8175                 data = 2;
8176
8177         for (i = 0; i < (data * 2); i++) {
8178                 if ((i % 2) == 0)
8179                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8180                                            LED_CTRL_1000MBPS_ON |
8181                                            LED_CTRL_100MBPS_ON |
8182                                            LED_CTRL_10MBPS_ON |
8183                                            LED_CTRL_TRAFFIC_OVERRIDE |
8184                                            LED_CTRL_TRAFFIC_BLINK |
8185                                            LED_CTRL_TRAFFIC_LED);
8186         
8187                 else
8188                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8189                                            LED_CTRL_TRAFFIC_OVERRIDE);
8190
8191                 if (msleep_interruptible(500))
8192                         break;
8193         }
8194         tw32(MAC_LED_CTRL, tp->led_ctrl);
8195         return 0;
8196 }
8197
8198 static void tg3_get_ethtool_stats (struct net_device *dev,
8199                                    struct ethtool_stats *estats, u64 *tmp_stats)
8200 {
8201         struct tg3 *tp = netdev_priv(dev);
8202         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8203 }
8204
8205 #define NVRAM_TEST_SIZE 0x100
8206 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8207
8208 static int tg3_test_nvram(struct tg3 *tp)
8209 {
8210         u32 *buf, csum, magic;
8211         int i, j, err = 0, size;
8212
8213         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8214                 return -EIO;
8215
8216         if (magic == TG3_EEPROM_MAGIC)
8217                 size = NVRAM_TEST_SIZE;
8218         else if ((magic & 0xff000000) == 0xa5000000) {
8219                 if ((magic & 0xe00000) == 0x200000)
8220                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8221                 else
8222                         return 0;
8223         } else
8224                 return -EIO;
8225
8226         buf = kmalloc(size, GFP_KERNEL);
8227         if (buf == NULL)
8228                 return -ENOMEM;
8229
8230         err = -EIO;
8231         for (i = 0, j = 0; i < size; i += 4, j++) {
8232                 u32 val;
8233
8234                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8235                         break;
8236                 buf[j] = cpu_to_le32(val);
8237         }
8238         if (i < size)
8239                 goto out;
8240
8241         /* Selfboot format */
8242         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8243                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8244
8245                 for (i = 0; i < size; i++)
8246                         csum8 += buf8[i];
8247
8248                 if (csum8 == 0) {
8249                         err = 0;
8250                         goto out;
8251                 }
8252
8253                 err = -EIO;
8254                 goto out;
8255         }
8256
8257         /* Bootstrap checksum at offset 0x10 */
8258         csum = calc_crc((unsigned char *) buf, 0x10);
8259         if(csum != cpu_to_le32(buf[0x10/4]))
8260                 goto out;
8261
8262         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8263         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8264         if (csum != cpu_to_le32(buf[0xfc/4]))
8265                  goto out;
8266
8267         err = 0;
8268
8269 out:
8270         kfree(buf);
8271         return err;
8272 }
8273
8274 #define TG3_SERDES_TIMEOUT_SEC  2
8275 #define TG3_COPPER_TIMEOUT_SEC  6
8276
8277 static int tg3_test_link(struct tg3 *tp)
8278 {
8279         int i, max;
8280
8281         if (!netif_running(tp->dev))
8282                 return -ENODEV;
8283
8284         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8285                 max = TG3_SERDES_TIMEOUT_SEC;
8286         else
8287                 max = TG3_COPPER_TIMEOUT_SEC;
8288
8289         for (i = 0; i < max; i++) {
8290                 if (netif_carrier_ok(tp->dev))
8291                         return 0;
8292
8293                 if (msleep_interruptible(1000))
8294                         break;
8295         }
8296
8297         return -EIO;
8298 }
8299
8300 /* Only test the commonly used registers */
8301 static int tg3_test_registers(struct tg3 *tp)
8302 {
8303         int i, is_5705;
8304         u32 offset, read_mask, write_mask, val, save_val, read_val;
8305         static struct {
8306                 u16 offset;
8307                 u16 flags;
8308 #define TG3_FL_5705     0x1
8309 #define TG3_FL_NOT_5705 0x2
8310 #define TG3_FL_NOT_5788 0x4
8311                 u32 read_mask;
8312                 u32 write_mask;
8313         } reg_tbl[] = {
8314                 /* MAC Control Registers */
8315                 { MAC_MODE, TG3_FL_NOT_5705,
8316                         0x00000000, 0x00ef6f8c },
8317                 { MAC_MODE, TG3_FL_5705,
8318                         0x00000000, 0x01ef6b8c },
8319                 { MAC_STATUS, TG3_FL_NOT_5705,
8320                         0x03800107, 0x00000000 },
8321                 { MAC_STATUS, TG3_FL_5705,
8322                         0x03800100, 0x00000000 },
8323                 { MAC_ADDR_0_HIGH, 0x0000,
8324                         0x00000000, 0x0000ffff },
8325                 { MAC_ADDR_0_LOW, 0x0000,
8326                         0x00000000, 0xffffffff },
8327                 { MAC_RX_MTU_SIZE, 0x0000,
8328                         0x00000000, 0x0000ffff },
8329                 { MAC_TX_MODE, 0x0000,
8330                         0x00000000, 0x00000070 },
8331                 { MAC_TX_LENGTHS, 0x0000,
8332                         0x00000000, 0x00003fff },
8333                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8334                         0x00000000, 0x000007fc },
8335                 { MAC_RX_MODE, TG3_FL_5705,
8336                         0x00000000, 0x000007dc },
8337                 { MAC_HASH_REG_0, 0x0000,
8338                         0x00000000, 0xffffffff },
8339                 { MAC_HASH_REG_1, 0x0000,
8340                         0x00000000, 0xffffffff },
8341                 { MAC_HASH_REG_2, 0x0000,
8342                         0x00000000, 0xffffffff },
8343                 { MAC_HASH_REG_3, 0x0000,
8344                         0x00000000, 0xffffffff },
8345
8346                 /* Receive Data and Receive BD Initiator Control Registers. */
8347                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8348                         0x00000000, 0xffffffff },
8349                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8350                         0x00000000, 0xffffffff },
8351                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8352                         0x00000000, 0x00000003 },
8353                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8354                         0x00000000, 0xffffffff },
8355                 { RCVDBDI_STD_BD+0, 0x0000,
8356                         0x00000000, 0xffffffff },
8357                 { RCVDBDI_STD_BD+4, 0x0000,
8358                         0x00000000, 0xffffffff },
8359                 { RCVDBDI_STD_BD+8, 0x0000,
8360                         0x00000000, 0xffff0002 },
8361                 { RCVDBDI_STD_BD+0xc, 0x0000,
8362                         0x00000000, 0xffffffff },
8363         
8364                 /* Receive BD Initiator Control Registers. */
8365                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8366                         0x00000000, 0xffffffff },
8367                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8368                         0x00000000, 0x000003ff },
8369                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8370                         0x00000000, 0xffffffff },
8371         
8372                 /* Host Coalescing Control Registers. */
8373                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8374                         0x00000000, 0x00000004 },
8375                 { HOSTCC_MODE, TG3_FL_5705,
8376                         0x00000000, 0x000000f6 },
8377                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8378                         0x00000000, 0xffffffff },
8379                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8380                         0x00000000, 0x000003ff },
8381                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8382                         0x00000000, 0xffffffff },
8383                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8384                         0x00000000, 0x000003ff },
8385                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8386                         0x00000000, 0xffffffff },
8387                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8388                         0x00000000, 0x000000ff },
8389                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8390                         0x00000000, 0xffffffff },
8391                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8392                         0x00000000, 0x000000ff },
8393                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8394                         0x00000000, 0xffffffff },
8395                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8396                         0x00000000, 0xffffffff },
8397                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8398                         0x00000000, 0xffffffff },
8399                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8400                         0x00000000, 0x000000ff },
8401                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8402                         0x00000000, 0xffffffff },
8403                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8404                         0x00000000, 0x000000ff },
8405                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8406                         0x00000000, 0xffffffff },
8407                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8408                         0x00000000, 0xffffffff },
8409                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8410                         0x00000000, 0xffffffff },
8411                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8412                         0x00000000, 0xffffffff },
8413                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8414                         0x00000000, 0xffffffff },
8415                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8416                         0xffffffff, 0x00000000 },
8417                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8418                         0xffffffff, 0x00000000 },
8419
8420                 /* Buffer Manager Control Registers. */
8421                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8422                         0x00000000, 0x007fff80 },
8423                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8424                         0x00000000, 0x007fffff },
8425                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8426                         0x00000000, 0x0000003f },
8427                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8428                         0x00000000, 0x000001ff },
8429                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8430                         0x00000000, 0x000001ff },
8431                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8432                         0xffffffff, 0x00000000 },
8433                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8434                         0xffffffff, 0x00000000 },
8435         
8436                 /* Mailbox Registers */
8437                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8438                         0x00000000, 0x000001ff },
8439                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8440                         0x00000000, 0x000001ff },
8441                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8442                         0x00000000, 0x000007ff },
8443                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8444                         0x00000000, 0x000001ff },
8445
8446                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8447         };
8448
8449         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8450                 is_5705 = 1;
8451         else
8452                 is_5705 = 0;
8453
8454         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8455                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8456                         continue;
8457
8458                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8459                         continue;
8460
8461                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8462                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8463                         continue;
8464
8465                 offset = (u32) reg_tbl[i].offset;
8466                 read_mask = reg_tbl[i].read_mask;
8467                 write_mask = reg_tbl[i].write_mask;
8468
8469                 /* Save the original register content */
8470                 save_val = tr32(offset);
8471
8472                 /* Determine the read-only value. */
8473                 read_val = save_val & read_mask;
8474
8475                 /* Write zero to the register, then make sure the read-only bits
8476                  * are not changed and the read/write bits are all zeros.
8477                  */
8478                 tw32(offset, 0);
8479
8480                 val = tr32(offset);
8481
8482                 /* Test the read-only and read/write bits. */
8483                 if (((val & read_mask) != read_val) || (val & write_mask))
8484                         goto out;
8485
8486                 /* Write ones to all the bits defined by RdMask and WrMask, then
8487                  * make sure the read-only bits are not changed and the
8488                  * read/write bits are all ones.
8489                  */
8490                 tw32(offset, read_mask | write_mask);
8491
8492                 val = tr32(offset);
8493
8494                 /* Test the read-only bits. */
8495                 if ((val & read_mask) != read_val)
8496                         goto out;
8497
8498                 /* Test the read/write bits. */
8499                 if ((val & write_mask) != write_mask)
8500                         goto out;
8501
8502                 tw32(offset, save_val);
8503         }
8504
8505         return 0;
8506
8507 out:
8508         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8509         tw32(offset, save_val);
8510         return -EIO;
8511 }
8512
8513 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8514 {
8515         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8516         int i;
8517         u32 j;
8518
8519         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8520                 for (j = 0; j < len; j += 4) {
8521                         u32 val;
8522
8523                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8524                         tg3_read_mem(tp, offset + j, &val);
8525                         if (val != test_pattern[i])
8526                                 return -EIO;
8527                 }
8528         }
8529         return 0;
8530 }
8531
8532 static int tg3_test_memory(struct tg3 *tp)
8533 {
8534         static struct mem_entry {
8535                 u32 offset;
8536                 u32 len;
8537         } mem_tbl_570x[] = {
8538                 { 0x00000000, 0x00b50},
8539                 { 0x00002000, 0x1c000},
8540                 { 0xffffffff, 0x00000}
8541         }, mem_tbl_5705[] = {
8542                 { 0x00000100, 0x0000c},
8543                 { 0x00000200, 0x00008},
8544                 { 0x00004000, 0x00800},
8545                 { 0x00006000, 0x01000},
8546                 { 0x00008000, 0x02000},
8547                 { 0x00010000, 0x0e000},
8548                 { 0xffffffff, 0x00000}
8549         }, mem_tbl_5755[] = {
8550                 { 0x00000200, 0x00008},
8551                 { 0x00004000, 0x00800},
8552                 { 0x00006000, 0x00800},
8553                 { 0x00008000, 0x02000},
8554                 { 0x00010000, 0x0c000},
8555                 { 0xffffffff, 0x00000}
8556         };
8557         struct mem_entry *mem_tbl;
8558         int err = 0;
8559         int i;
8560
8561         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8562                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8563                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8564                         mem_tbl = mem_tbl_5755;
8565                 else
8566                         mem_tbl = mem_tbl_5705;
8567         } else
8568                 mem_tbl = mem_tbl_570x;
8569
8570         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8571                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8572                     mem_tbl[i].len)) != 0)
8573                         break;
8574         }
8575         
8576         return err;
8577 }
8578
8579 #define TG3_MAC_LOOPBACK        0
8580 #define TG3_PHY_LOOPBACK        1
8581
8582 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8583 {
8584         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8585         u32 desc_idx;
8586         struct sk_buff *skb, *rx_skb;
8587         u8 *tx_data;
8588         dma_addr_t map;
8589         int num_pkts, tx_len, rx_len, i, err;
8590         struct tg3_rx_buffer_desc *desc;
8591
8592         if (loopback_mode == TG3_MAC_LOOPBACK) {
8593                 /* HW errata - mac loopback fails in some cases on 5780.
8594                  * Normal traffic and PHY loopback are not affected by
8595                  * errata.
8596                  */
8597                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8598                         return 0;
8599
8600                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8601                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8602                            MAC_MODE_PORT_MODE_GMII;
8603                 tw32(MAC_MODE, mac_mode);
8604         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8605                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8606                                            BMCR_SPEED1000);
8607                 udelay(40);
8608                 /* reset to prevent losing 1st rx packet intermittently */
8609                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8610                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8611                         udelay(10);
8612                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8613                 }
8614                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8615                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8616                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8617                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8618                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8619                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8620                 }
8621                 tw32(MAC_MODE, mac_mode);
8622         }
8623         else
8624                 return -EINVAL;
8625
8626         err = -EIO;
8627
8628         tx_len = 1514;
8629         skb = netdev_alloc_skb(tp->dev, tx_len);
8630         if (!skb)
8631                 return -ENOMEM;
8632
8633         tx_data = skb_put(skb, tx_len);
8634         memcpy(tx_data, tp->dev->dev_addr, 6);
8635         memset(tx_data + 6, 0x0, 8);
8636
8637         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8638
8639         for (i = 14; i < tx_len; i++)
8640                 tx_data[i] = (u8) (i & 0xff);
8641
8642         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8643
8644         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8645              HOSTCC_MODE_NOW);
8646
8647         udelay(10);
8648
8649         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8650
8651         num_pkts = 0;
8652
8653         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8654
8655         tp->tx_prod++;
8656         num_pkts++;
8657
8658         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8659                      tp->tx_prod);
8660         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8661
8662         udelay(10);
8663
8664         for (i = 0; i < 10; i++) {
8665                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8666                        HOSTCC_MODE_NOW);
8667
8668                 udelay(10);
8669
8670                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8671                 rx_idx = tp->hw_status->idx[0].rx_producer;
8672                 if ((tx_idx == tp->tx_prod) &&
8673                     (rx_idx == (rx_start_idx + num_pkts)))
8674                         break;
8675         }
8676
8677         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8678         dev_kfree_skb(skb);
8679
8680         if (tx_idx != tp->tx_prod)
8681                 goto out;
8682
8683         if (rx_idx != rx_start_idx + num_pkts)
8684                 goto out;
8685
8686         desc = &tp->rx_rcb[rx_start_idx];
8687         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8688         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8689         if (opaque_key != RXD_OPAQUE_RING_STD)
8690                 goto out;
8691
8692         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8693             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8694                 goto out;
8695
8696         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8697         if (rx_len != tx_len)
8698                 goto out;
8699
8700         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8701
8702         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8703         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8704
8705         for (i = 14; i < tx_len; i++) {
8706                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8707                         goto out;
8708         }
8709         err = 0;
8710         
8711         /* tg3_free_rings will unmap and free the rx_skb */
8712 out:
8713         return err;
8714 }
8715
8716 #define TG3_MAC_LOOPBACK_FAILED         1
8717 #define TG3_PHY_LOOPBACK_FAILED         2
8718 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8719                                          TG3_PHY_LOOPBACK_FAILED)
8720
8721 static int tg3_test_loopback(struct tg3 *tp)
8722 {
8723         int err = 0;
8724
8725         if (!netif_running(tp->dev))
8726                 return TG3_LOOPBACK_FAILED;
8727
8728         err = tg3_reset_hw(tp, 1);
8729         if (err)
8730                 return TG3_LOOPBACK_FAILED;
8731
8732         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8733                 err |= TG3_MAC_LOOPBACK_FAILED;
8734         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8735                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8736                         err |= TG3_PHY_LOOPBACK_FAILED;
8737         }
8738
8739         return err;
8740 }
8741
8742 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8743                           u64 *data)
8744 {
8745         struct tg3 *tp = netdev_priv(dev);
8746
8747         if (tp->link_config.phy_is_low_power)
8748                 tg3_set_power_state(tp, PCI_D0);
8749
8750         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8751
8752         if (tg3_test_nvram(tp) != 0) {
8753                 etest->flags |= ETH_TEST_FL_FAILED;
8754                 data[0] = 1;
8755         }
8756         if (tg3_test_link(tp) != 0) {
8757                 etest->flags |= ETH_TEST_FL_FAILED;
8758                 data[1] = 1;
8759         }
8760         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8761                 int err, irq_sync = 0;
8762
8763                 if (netif_running(dev)) {
8764                         tg3_netif_stop(tp);
8765                         irq_sync = 1;
8766                 }
8767
8768                 tg3_full_lock(tp, irq_sync);
8769
8770                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8771                 err = tg3_nvram_lock(tp);
8772                 tg3_halt_cpu(tp, RX_CPU_BASE);
8773                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8774                         tg3_halt_cpu(tp, TX_CPU_BASE);
8775                 if (!err)
8776                         tg3_nvram_unlock(tp);
8777
8778                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8779                         tg3_phy_reset(tp);
8780
8781                 if (tg3_test_registers(tp) != 0) {
8782                         etest->flags |= ETH_TEST_FL_FAILED;
8783                         data[2] = 1;
8784                 }
8785                 if (tg3_test_memory(tp) != 0) {
8786                         etest->flags |= ETH_TEST_FL_FAILED;
8787                         data[3] = 1;
8788                 }
8789                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8790                         etest->flags |= ETH_TEST_FL_FAILED;
8791
8792                 tg3_full_unlock(tp);
8793
8794                 if (tg3_test_interrupt(tp) != 0) {
8795                         etest->flags |= ETH_TEST_FL_FAILED;
8796                         data[5] = 1;
8797                 }
8798
8799                 tg3_full_lock(tp, 0);
8800
8801                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8802                 if (netif_running(dev)) {
8803                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8804                         if (!tg3_restart_hw(tp, 1))
8805                                 tg3_netif_start(tp);
8806                 }
8807
8808                 tg3_full_unlock(tp);
8809         }
8810         if (tp->link_config.phy_is_low_power)
8811                 tg3_set_power_state(tp, PCI_D3hot);
8812
8813 }
8814
8815 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8816 {
8817         struct mii_ioctl_data *data = if_mii(ifr);
8818         struct tg3 *tp = netdev_priv(dev);
8819         int err;
8820
8821         switch(cmd) {
8822         case SIOCGMIIPHY:
8823                 data->phy_id = PHY_ADDR;
8824
8825                 /* fallthru */
8826         case SIOCGMIIREG: {
8827                 u32 mii_regval;
8828
8829                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8830                         break;                  /* We have no PHY */
8831
8832                 if (tp->link_config.phy_is_low_power)
8833                         return -EAGAIN;
8834
8835                 spin_lock_bh(&tp->lock);
8836                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8837                 spin_unlock_bh(&tp->lock);
8838
8839                 data->val_out = mii_regval;
8840
8841                 return err;
8842         }
8843
8844         case SIOCSMIIREG:
8845                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8846                         break;                  /* We have no PHY */
8847
8848                 if (!capable(CAP_NET_ADMIN))
8849                         return -EPERM;
8850
8851                 if (tp->link_config.phy_is_low_power)
8852                         return -EAGAIN;
8853
8854                 spin_lock_bh(&tp->lock);
8855                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8856                 spin_unlock_bh(&tp->lock);
8857
8858                 return err;
8859
8860         default:
8861                 /* do nothing */
8862                 break;
8863         }
8864         return -EOPNOTSUPP;
8865 }
8866
8867 #if TG3_VLAN_TAG_USED
8868 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8869 {
8870         struct tg3 *tp = netdev_priv(dev);
8871
8872         if (netif_running(dev))
8873                 tg3_netif_stop(tp);
8874
8875         tg3_full_lock(tp, 0);
8876
8877         tp->vlgrp = grp;
8878
8879         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8880         __tg3_set_rx_mode(dev);
8881
8882         tg3_full_unlock(tp);
8883
8884         if (netif_running(dev))
8885                 tg3_netif_start(tp);
8886 }
8887
8888 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8889 {
8890         struct tg3 *tp = netdev_priv(dev);
8891
8892         if (netif_running(dev))
8893                 tg3_netif_stop(tp);
8894
8895         tg3_full_lock(tp, 0);
8896         if (tp->vlgrp)
8897                 tp->vlgrp->vlan_devices[vid] = NULL;
8898         tg3_full_unlock(tp);
8899
8900         if (netif_running(dev))
8901                 tg3_netif_start(tp);
8902 }
8903 #endif
8904
8905 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8906 {
8907         struct tg3 *tp = netdev_priv(dev);
8908
8909         memcpy(ec, &tp->coal, sizeof(*ec));
8910         return 0;
8911 }
8912
8913 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8914 {
8915         struct tg3 *tp = netdev_priv(dev);
8916         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8917         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8918
8919         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8920                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8921                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8922                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8923                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8924         }
8925
8926         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8927             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8928             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8929             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8930             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8931             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8932             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8933             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8934             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8935             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8936                 return -EINVAL;
8937
8938         /* No rx interrupts will be generated if both are zero */
8939         if ((ec->rx_coalesce_usecs == 0) &&
8940             (ec->rx_max_coalesced_frames == 0))
8941                 return -EINVAL;
8942
8943         /* No tx interrupts will be generated if both are zero */
8944         if ((ec->tx_coalesce_usecs == 0) &&
8945             (ec->tx_max_coalesced_frames == 0))
8946                 return -EINVAL;
8947
8948         /* Only copy relevant parameters, ignore all others. */
8949         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8950         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8951         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8952         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8953         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8954         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8955         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8956         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8957         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8958
8959         if (netif_running(dev)) {
8960                 tg3_full_lock(tp, 0);
8961                 __tg3_set_coalesce(tp, &tp->coal);
8962                 tg3_full_unlock(tp);
8963         }
8964         return 0;
8965 }
8966
8967 static struct ethtool_ops tg3_ethtool_ops = {
8968         .get_settings           = tg3_get_settings,
8969         .set_settings           = tg3_set_settings,
8970         .get_drvinfo            = tg3_get_drvinfo,
8971         .get_regs_len           = tg3_get_regs_len,
8972         .get_regs               = tg3_get_regs,
8973         .get_wol                = tg3_get_wol,
8974         .set_wol                = tg3_set_wol,
8975         .get_msglevel           = tg3_get_msglevel,
8976         .set_msglevel           = tg3_set_msglevel,
8977         .nway_reset             = tg3_nway_reset,
8978         .get_link               = ethtool_op_get_link,
8979         .get_eeprom_len         = tg3_get_eeprom_len,
8980         .get_eeprom             = tg3_get_eeprom,
8981         .set_eeprom             = tg3_set_eeprom,
8982         .get_ringparam          = tg3_get_ringparam,
8983         .set_ringparam          = tg3_set_ringparam,
8984         .get_pauseparam         = tg3_get_pauseparam,
8985         .set_pauseparam         = tg3_set_pauseparam,
8986         .get_rx_csum            = tg3_get_rx_csum,
8987         .set_rx_csum            = tg3_set_rx_csum,
8988         .get_tx_csum            = ethtool_op_get_tx_csum,
8989         .set_tx_csum            = tg3_set_tx_csum,
8990         .get_sg                 = ethtool_op_get_sg,
8991         .set_sg                 = ethtool_op_set_sg,
8992 #if TG3_TSO_SUPPORT != 0
8993         .get_tso                = ethtool_op_get_tso,
8994         .set_tso                = tg3_set_tso,
8995 #endif
8996         .self_test_count        = tg3_get_test_count,
8997         .self_test              = tg3_self_test,
8998         .get_strings            = tg3_get_strings,
8999         .phys_id                = tg3_phys_id,
9000         .get_stats_count        = tg3_get_stats_count,
9001         .get_ethtool_stats      = tg3_get_ethtool_stats,
9002         .get_coalesce           = tg3_get_coalesce,
9003         .set_coalesce           = tg3_set_coalesce,
9004         .get_perm_addr          = ethtool_op_get_perm_addr,
9005 };
9006
9007 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9008 {
9009         u32 cursize, val, magic;
9010
9011         tp->nvram_size = EEPROM_CHIP_SIZE;
9012
9013         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9014                 return;
9015
9016         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9017                 return;
9018
9019         /*
9020          * Size the chip by reading offsets at increasing powers of two.
9021          * When we encounter our validation signature, we know the addressing
9022          * has wrapped around, and thus have our chip size.
9023          */
9024         cursize = 0x10;
9025
9026         while (cursize < tp->nvram_size) {
9027                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9028                         return;
9029
9030                 if (val == magic)
9031                         break;
9032
9033                 cursize <<= 1;
9034         }
9035
9036         tp->nvram_size = cursize;
9037 }
9038                 
9039 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9040 {
9041         u32 val;
9042
9043         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9044                 return;
9045
9046         /* Selfboot format */
9047         if (val != TG3_EEPROM_MAGIC) {
9048                 tg3_get_eeprom_size(tp);
9049                 return;
9050         }
9051
9052         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9053                 if (val != 0) {
9054                         tp->nvram_size = (val >> 16) * 1024;
9055                         return;
9056                 }
9057         }
9058         tp->nvram_size = 0x20000;
9059 }
9060
9061 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9062 {
9063         u32 nvcfg1;
9064
9065         nvcfg1 = tr32(NVRAM_CFG1);
9066         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9067                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9068         }
9069         else {
9070                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9071                 tw32(NVRAM_CFG1, nvcfg1);
9072         }
9073
9074         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9075             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9076                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9077                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9078                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9079                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9080                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9081                                 break;
9082                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9083                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9084                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9085                                 break;
9086                         case FLASH_VENDOR_ATMEL_EEPROM:
9087                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9088                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9089                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9090                                 break;
9091                         case FLASH_VENDOR_ST:
9092                                 tp->nvram_jedecnum = JEDEC_ST;
9093                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9094                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9095                                 break;
9096                         case FLASH_VENDOR_SAIFUN:
9097                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9098                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9099                                 break;
9100                         case FLASH_VENDOR_SST_SMALL:
9101                         case FLASH_VENDOR_SST_LARGE:
9102                                 tp->nvram_jedecnum = JEDEC_SST;
9103                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9104                                 break;
9105                 }
9106         }
9107         else {
9108                 tp->nvram_jedecnum = JEDEC_ATMEL;
9109                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9110                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9111         }
9112 }
9113
9114 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9115 {
9116         u32 nvcfg1;
9117
9118         nvcfg1 = tr32(NVRAM_CFG1);
9119
9120         /* NVRAM protection for TPM */
9121         if (nvcfg1 & (1 << 27))
9122                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9123
9124         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9125                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9126                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9127                         tp->nvram_jedecnum = JEDEC_ATMEL;
9128                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9129                         break;
9130                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9131                         tp->nvram_jedecnum = JEDEC_ATMEL;
9132                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9133                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9134                         break;
9135                 case FLASH_5752VENDOR_ST_M45PE10:
9136                 case FLASH_5752VENDOR_ST_M45PE20:
9137                 case FLASH_5752VENDOR_ST_M45PE40:
9138                         tp->nvram_jedecnum = JEDEC_ST;
9139                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9140                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9141                         break;
9142         }
9143
9144         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9145                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9146                         case FLASH_5752PAGE_SIZE_256:
9147                                 tp->nvram_pagesize = 256;
9148                                 break;
9149                         case FLASH_5752PAGE_SIZE_512:
9150                                 tp->nvram_pagesize = 512;
9151                                 break;
9152                         case FLASH_5752PAGE_SIZE_1K:
9153                                 tp->nvram_pagesize = 1024;
9154                                 break;
9155                         case FLASH_5752PAGE_SIZE_2K:
9156                                 tp->nvram_pagesize = 2048;
9157                                 break;
9158                         case FLASH_5752PAGE_SIZE_4K:
9159                                 tp->nvram_pagesize = 4096;
9160                                 break;
9161                         case FLASH_5752PAGE_SIZE_264:
9162                                 tp->nvram_pagesize = 264;
9163                                 break;
9164                 }
9165         }
9166         else {
9167                 /* For eeprom, set pagesize to maximum eeprom size */
9168                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9169
9170                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9171                 tw32(NVRAM_CFG1, nvcfg1);
9172         }
9173 }
9174
9175 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9176 {
9177         u32 nvcfg1;
9178
9179         nvcfg1 = tr32(NVRAM_CFG1);
9180
9181         /* NVRAM protection for TPM */
9182         if (nvcfg1 & (1 << 27))
9183                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9184
9185         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9186                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9187                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9188                         tp->nvram_jedecnum = JEDEC_ATMEL;
9189                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9190                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9191
9192                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9193                         tw32(NVRAM_CFG1, nvcfg1);
9194                         break;
9195                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9196                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9197                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9198                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9199                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9200                         tp->nvram_jedecnum = JEDEC_ATMEL;
9201                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9202                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9203                         tp->nvram_pagesize = 264;
9204                         break;
9205                 case FLASH_5752VENDOR_ST_M45PE10:
9206                 case FLASH_5752VENDOR_ST_M45PE20:
9207                 case FLASH_5752VENDOR_ST_M45PE40:
9208                         tp->nvram_jedecnum = JEDEC_ST;
9209                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9210                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9211                         tp->nvram_pagesize = 256;
9212                         break;
9213         }
9214 }
9215
9216 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9217 {
9218         u32 nvcfg1;
9219
9220         nvcfg1 = tr32(NVRAM_CFG1);
9221
9222         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9223                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9224                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9225                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9226                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9227                         tp->nvram_jedecnum = JEDEC_ATMEL;
9228                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9229                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9230
9231                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9232                         tw32(NVRAM_CFG1, nvcfg1);
9233                         break;
9234                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9235                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9236                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9237                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9238                         tp->nvram_jedecnum = JEDEC_ATMEL;
9239                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9240                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9241                         tp->nvram_pagesize = 264;
9242                         break;
9243                 case FLASH_5752VENDOR_ST_M45PE10:
9244                 case FLASH_5752VENDOR_ST_M45PE20:
9245                 case FLASH_5752VENDOR_ST_M45PE40:
9246                         tp->nvram_jedecnum = JEDEC_ST;
9247                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9248                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9249                         tp->nvram_pagesize = 256;
9250                         break;
9251         }
9252 }
9253
9254 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9255 static void __devinit tg3_nvram_init(struct tg3 *tp)
9256 {
9257         int j;
9258
9259         tw32_f(GRC_EEPROM_ADDR,
9260              (EEPROM_ADDR_FSM_RESET |
9261               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9262                EEPROM_ADDR_CLKPERD_SHIFT)));
9263
9264         /* XXX schedule_timeout() ... */
9265         for (j = 0; j < 100; j++)
9266                 udelay(10);
9267
9268         /* Enable seeprom accesses. */
9269         tw32_f(GRC_LOCAL_CTRL,
9270              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9271         udelay(100);
9272
9273         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9274             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9275                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9276
9277                 if (tg3_nvram_lock(tp)) {
9278                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9279                                "tg3_nvram_init failed.\n", tp->dev->name);
9280                         return;
9281                 }
9282                 tg3_enable_nvram_access(tp);
9283
9284                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9285                         tg3_get_5752_nvram_info(tp);
9286                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9287                         tg3_get_5755_nvram_info(tp);
9288                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9289                         tg3_get_5787_nvram_info(tp);
9290                 else
9291                         tg3_get_nvram_info(tp);
9292
9293                 tg3_get_nvram_size(tp);
9294
9295                 tg3_disable_nvram_access(tp);
9296                 tg3_nvram_unlock(tp);
9297
9298         } else {
9299                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9300
9301                 tg3_get_eeprom_size(tp);
9302         }
9303 }
9304
9305 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9306                                         u32 offset, u32 *val)
9307 {
9308         u32 tmp;
9309         int i;
9310
9311         if (offset > EEPROM_ADDR_ADDR_MASK ||
9312             (offset % 4) != 0)
9313                 return -EINVAL;
9314
9315         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9316                                         EEPROM_ADDR_DEVID_MASK |
9317                                         EEPROM_ADDR_READ);
9318         tw32(GRC_EEPROM_ADDR,
9319              tmp |
9320              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9321              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9322               EEPROM_ADDR_ADDR_MASK) |
9323              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9324
9325         for (i = 0; i < 10000; i++) {
9326                 tmp = tr32(GRC_EEPROM_ADDR);
9327
9328                 if (tmp & EEPROM_ADDR_COMPLETE)
9329                         break;
9330                 udelay(100);
9331         }
9332         if (!(tmp & EEPROM_ADDR_COMPLETE))
9333                 return -EBUSY;
9334
9335         *val = tr32(GRC_EEPROM_DATA);
9336         return 0;
9337 }
9338
9339 #define NVRAM_CMD_TIMEOUT 10000
9340
9341 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9342 {
9343         int i;
9344
9345         tw32(NVRAM_CMD, nvram_cmd);
9346         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9347                 udelay(10);
9348                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9349                         udelay(10);
9350                         break;
9351                 }
9352         }
9353         if (i == NVRAM_CMD_TIMEOUT) {
9354                 return -EBUSY;
9355         }
9356         return 0;
9357 }
9358
9359 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9360 {
9361         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9362             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9363             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9364             (tp->nvram_jedecnum == JEDEC_ATMEL))
9365
9366                 addr = ((addr / tp->nvram_pagesize) <<
9367                         ATMEL_AT45DB0X1B_PAGE_POS) +
9368                        (addr % tp->nvram_pagesize);
9369
9370         return addr;
9371 }
9372
9373 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9374 {
9375         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9376             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9377             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9378             (tp->nvram_jedecnum == JEDEC_ATMEL))
9379
9380                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9381                         tp->nvram_pagesize) +
9382                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9383
9384         return addr;
9385 }
9386
9387 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9388 {
9389         int ret;
9390
9391         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9392                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9393
9394         offset = tg3_nvram_phys_addr(tp, offset);
9395
9396         if (offset > NVRAM_ADDR_MSK)
9397                 return -EINVAL;
9398
9399         ret = tg3_nvram_lock(tp);
9400         if (ret)
9401                 return ret;
9402
9403         tg3_enable_nvram_access(tp);
9404
9405         tw32(NVRAM_ADDR, offset);
9406         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9407                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9408
9409         if (ret == 0)
9410                 *val = swab32(tr32(NVRAM_RDDATA));
9411
9412         tg3_disable_nvram_access(tp);
9413
9414         tg3_nvram_unlock(tp);
9415
9416         return ret;
9417 }
9418
9419 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9420 {
9421         int err;
9422         u32 tmp;
9423
9424         err = tg3_nvram_read(tp, offset, &tmp);
9425         *val = swab32(tmp);
9426         return err;
9427 }
9428
9429 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9430                                     u32 offset, u32 len, u8 *buf)
9431 {
9432         int i, j, rc = 0;
9433         u32 val;
9434
9435         for (i = 0; i < len; i += 4) {
9436                 u32 addr, data;
9437
9438                 addr = offset + i;
9439
9440                 memcpy(&data, buf + i, 4);
9441
9442                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9443
9444                 val = tr32(GRC_EEPROM_ADDR);
9445                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9446
9447                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9448                         EEPROM_ADDR_READ);
9449                 tw32(GRC_EEPROM_ADDR, val |
9450                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9451                         (addr & EEPROM_ADDR_ADDR_MASK) |
9452                         EEPROM_ADDR_START |
9453                         EEPROM_ADDR_WRITE);
9454                 
9455                 for (j = 0; j < 10000; j++) {
9456                         val = tr32(GRC_EEPROM_ADDR);
9457
9458                         if (val & EEPROM_ADDR_COMPLETE)
9459                                 break;
9460                         udelay(100);
9461                 }
9462                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9463                         rc = -EBUSY;
9464                         break;
9465                 }
9466         }
9467
9468         return rc;
9469 }
9470
9471 /* offset and length are dword aligned */
9472 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9473                 u8 *buf)
9474 {
9475         int ret = 0;
9476         u32 pagesize = tp->nvram_pagesize;
9477         u32 pagemask = pagesize - 1;
9478         u32 nvram_cmd;
9479         u8 *tmp;
9480
9481         tmp = kmalloc(pagesize, GFP_KERNEL);
9482         if (tmp == NULL)
9483                 return -ENOMEM;
9484
9485         while (len) {
9486                 int j;
9487                 u32 phy_addr, page_off, size;
9488
9489                 phy_addr = offset & ~pagemask;
9490         
9491                 for (j = 0; j < pagesize; j += 4) {
9492                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9493                                                 (u32 *) (tmp + j))))
9494                                 break;
9495                 }
9496                 if (ret)
9497                         break;
9498
9499                 page_off = offset & pagemask;
9500                 size = pagesize;
9501                 if (len < size)
9502                         size = len;
9503
9504                 len -= size;
9505
9506                 memcpy(tmp + page_off, buf, size);
9507
9508                 offset = offset + (pagesize - page_off);
9509
9510                 tg3_enable_nvram_access(tp);
9511
9512                 /*
9513                  * Before we can erase the flash page, we need
9514                  * to issue a special "write enable" command.
9515                  */
9516                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9517
9518                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9519                         break;
9520
9521                 /* Erase the target page */
9522                 tw32(NVRAM_ADDR, phy_addr);
9523
9524                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9525                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9526
9527                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9528                         break;
9529
9530                 /* Issue another write enable to start the write. */
9531                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9532
9533                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9534                         break;
9535
9536                 for (j = 0; j < pagesize; j += 4) {
9537                         u32 data;
9538
9539                         data = *((u32 *) (tmp + j));
9540                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9541
9542                         tw32(NVRAM_ADDR, phy_addr + j);
9543
9544                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9545                                 NVRAM_CMD_WR;
9546
9547                         if (j == 0)
9548                                 nvram_cmd |= NVRAM_CMD_FIRST;
9549                         else if (j == (pagesize - 4))
9550                                 nvram_cmd |= NVRAM_CMD_LAST;
9551
9552                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9553                                 break;
9554                 }
9555                 if (ret)
9556                         break;
9557         }
9558
9559         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9560         tg3_nvram_exec_cmd(tp, nvram_cmd);
9561
9562         kfree(tmp);
9563
9564         return ret;
9565 }
9566
9567 /* offset and length are dword aligned */
9568 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9569                 u8 *buf)
9570 {
9571         int i, ret = 0;
9572
9573         for (i = 0; i < len; i += 4, offset += 4) {
9574                 u32 data, page_off, phy_addr, nvram_cmd;
9575
9576                 memcpy(&data, buf + i, 4);
9577                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9578
9579                 page_off = offset % tp->nvram_pagesize;
9580
9581                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9582
9583                 tw32(NVRAM_ADDR, phy_addr);
9584
9585                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9586
9587                 if ((page_off == 0) || (i == 0))
9588                         nvram_cmd |= NVRAM_CMD_FIRST;
9589                 if (page_off == (tp->nvram_pagesize - 4))
9590                         nvram_cmd |= NVRAM_CMD_LAST;
9591
9592                 if (i == (len - 4))
9593                         nvram_cmd |= NVRAM_CMD_LAST;
9594
9595                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9596                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9597                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9598                     (tp->nvram_jedecnum == JEDEC_ST) &&
9599                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9600
9601                         if ((ret = tg3_nvram_exec_cmd(tp,
9602                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9603                                 NVRAM_CMD_DONE)))
9604
9605                                 break;
9606                 }
9607                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9608                         /* We always do complete word writes to eeprom. */
9609                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9610                 }
9611
9612                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9613                         break;
9614         }
9615         return ret;
9616 }
9617
9618 /* offset and length are dword aligned */
9619 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9620 {
9621         int ret;
9622
9623         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9624                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9625                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9626                 udelay(40);
9627         }
9628
9629         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9630                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9631         }
9632         else {
9633                 u32 grc_mode;
9634
9635                 ret = tg3_nvram_lock(tp);
9636                 if (ret)
9637                         return ret;
9638
9639                 tg3_enable_nvram_access(tp);
9640                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9641                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9642                         tw32(NVRAM_WRITE1, 0x406);
9643
9644                 grc_mode = tr32(GRC_MODE);
9645                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9646
9647                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9648                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9649
9650                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9651                                 buf);
9652                 }
9653                 else {
9654                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9655                                 buf);
9656                 }
9657
9658                 grc_mode = tr32(GRC_MODE);
9659                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9660
9661                 tg3_disable_nvram_access(tp);
9662                 tg3_nvram_unlock(tp);
9663         }
9664
9665         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9666                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9667                 udelay(40);
9668         }
9669
9670         return ret;
9671 }
9672
9673 struct subsys_tbl_ent {
9674         u16 subsys_vendor, subsys_devid;
9675         u32 phy_id;
9676 };
9677
9678 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9679         /* Broadcom boards. */
9680         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9681         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9682         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9683         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9684         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9685         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9686         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9687         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9688         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9689         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9690         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9691
9692         /* 3com boards. */
9693         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9694         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9695         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9696         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9697         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9698
9699         /* DELL boards. */
9700         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9701         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9702         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9703         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9704
9705         /* Compaq boards. */
9706         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9707         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9708         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9709         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9710         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9711
9712         /* IBM boards. */
9713         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9714 };
9715
9716 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9717 {
9718         int i;
9719
9720         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9721                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9722                      tp->pdev->subsystem_vendor) &&
9723                     (subsys_id_to_phy_id[i].subsys_devid ==
9724                      tp->pdev->subsystem_device))
9725                         return &subsys_id_to_phy_id[i];
9726         }
9727         return NULL;
9728 }
9729
9730 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9731 {
9732         u32 val;
9733         u16 pmcsr;
9734
9735         /* On some early chips the SRAM cannot be accessed in D3hot state,
9736          * so need make sure we're in D0.
9737          */
9738         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9739         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9740         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9741         msleep(1);
9742
9743         /* Make sure register accesses (indirect or otherwise)
9744          * will function correctly.
9745          */
9746         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9747                                tp->misc_host_ctrl);
9748
9749         /* The memory arbiter has to be enabled in order for SRAM accesses
9750          * to succeed.  Normally on powerup the tg3 chip firmware will make
9751          * sure it is enabled, but other entities such as system netboot
9752          * code might disable it.
9753          */
9754         val = tr32(MEMARB_MODE);
9755         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9756
9757         tp->phy_id = PHY_ID_INVALID;
9758         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9759
9760         /* Assume an onboard device by default.  */
9761         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9762
9763         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9764         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9765                 u32 nic_cfg, led_cfg;
9766                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9767                 int eeprom_phy_serdes = 0;
9768
9769                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9770                 tp->nic_sram_data_cfg = nic_cfg;
9771
9772                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9773                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9774                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9775                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9776                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9777                     (ver > 0) && (ver < 0x100))
9778                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9779
9780                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9781                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9782                         eeprom_phy_serdes = 1;
9783
9784                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9785                 if (nic_phy_id != 0) {
9786                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9787                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9788
9789                         eeprom_phy_id  = (id1 >> 16) << 10;
9790                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9791                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9792                 } else
9793                         eeprom_phy_id = 0;
9794
9795                 tp->phy_id = eeprom_phy_id;
9796                 if (eeprom_phy_serdes) {
9797                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9798                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9799                         else
9800                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9801                 }
9802
9803                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9804                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9805                                     SHASTA_EXT_LED_MODE_MASK);
9806                 else
9807                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9808
9809                 switch (led_cfg) {
9810                 default:
9811                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9812                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9813                         break;
9814
9815                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9816                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9817                         break;
9818
9819                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9820                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9821
9822                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9823                          * read on some older 5700/5701 bootcode.
9824                          */
9825                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9826                             ASIC_REV_5700 ||
9827                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9828                             ASIC_REV_5701)
9829                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9830
9831                         break;
9832
9833                 case SHASTA_EXT_LED_SHARED:
9834                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9835                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9836                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9837                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9838                                                  LED_CTRL_MODE_PHY_2);
9839                         break;
9840
9841                 case SHASTA_EXT_LED_MAC:
9842                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9843                         break;
9844
9845                 case SHASTA_EXT_LED_COMBO:
9846                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9847                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9848                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9849                                                  LED_CTRL_MODE_PHY_2);
9850                         break;
9851
9852                 };
9853
9854                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9855                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9856                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9857                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9858
9859                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9860                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9861                 else
9862                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9863
9864                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9865                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9866                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9867                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9868                 }
9869                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9870                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9871
9872                 if (cfg2 & (1 << 17))
9873                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9874
9875                 /* serdes signal pre-emphasis in register 0x590 set by */
9876                 /* bootcode if bit 18 is set */
9877                 if (cfg2 & (1 << 18))
9878                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9879         }
9880 }
9881
9882 static int __devinit tg3_phy_probe(struct tg3 *tp)
9883 {
9884         u32 hw_phy_id_1, hw_phy_id_2;
9885         u32 hw_phy_id, hw_phy_id_masked;
9886         int err;
9887
9888         /* Reading the PHY ID register can conflict with ASF
9889          * firwmare access to the PHY hardware.
9890          */
9891         err = 0;
9892         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9893                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9894         } else {
9895                 /* Now read the physical PHY_ID from the chip and verify
9896                  * that it is sane.  If it doesn't look good, we fall back
9897                  * to either the hard-coded table based PHY_ID and failing
9898                  * that the value found in the eeprom area.
9899                  */
9900                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9901                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9902
9903                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9904                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9905                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9906
9907                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9908         }
9909
9910         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9911                 tp->phy_id = hw_phy_id;
9912                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9913                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9914                 else
9915                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9916         } else {
9917                 if (tp->phy_id != PHY_ID_INVALID) {
9918                         /* Do nothing, phy ID already set up in
9919                          * tg3_get_eeprom_hw_cfg().
9920                          */
9921                 } else {
9922                         struct subsys_tbl_ent *p;
9923
9924                         /* No eeprom signature?  Try the hardcoded
9925                          * subsys device table.
9926                          */
9927                         p = lookup_by_subsys(tp);
9928                         if (!p)
9929                                 return -ENODEV;
9930
9931                         tp->phy_id = p->phy_id;
9932                         if (!tp->phy_id ||
9933                             tp->phy_id == PHY_ID_BCM8002)
9934                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9935                 }
9936         }
9937
9938         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9939             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9940                 u32 bmsr, adv_reg, tg3_ctrl;
9941
9942                 tg3_readphy(tp, MII_BMSR, &bmsr);
9943                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9944                     (bmsr & BMSR_LSTATUS))
9945                         goto skip_phy_reset;
9946                     
9947                 err = tg3_phy_reset(tp);
9948                 if (err)
9949                         return err;
9950
9951                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9952                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9953                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9954                 tg3_ctrl = 0;
9955                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9956                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9957                                     MII_TG3_CTRL_ADV_1000_FULL);
9958                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9959                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9960                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9961                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9962                 }
9963
9964                 if (!tg3_copper_is_advertising_all(tp)) {
9965                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9966
9967                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9968                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9969
9970                         tg3_writephy(tp, MII_BMCR,
9971                                      BMCR_ANENABLE | BMCR_ANRESTART);
9972                 }
9973                 tg3_phy_set_wirespeed(tp);
9974
9975                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9976                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9977                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9978         }
9979
9980 skip_phy_reset:
9981         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9982                 err = tg3_init_5401phy_dsp(tp);
9983                 if (err)
9984                         return err;
9985         }
9986
9987         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9988                 err = tg3_init_5401phy_dsp(tp);
9989         }
9990
9991         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9992                 tp->link_config.advertising =
9993                         (ADVERTISED_1000baseT_Half |
9994                          ADVERTISED_1000baseT_Full |
9995                          ADVERTISED_Autoneg |
9996                          ADVERTISED_FIBRE);
9997         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9998                 tp->link_config.advertising &=
9999                         ~(ADVERTISED_1000baseT_Half |
10000                           ADVERTISED_1000baseT_Full);
10001
10002         return err;
10003 }
10004
10005 static void __devinit tg3_read_partno(struct tg3 *tp)
10006 {
10007         unsigned char vpd_data[256];
10008         int i;
10009         u32 magic;
10010
10011         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10012                 goto out_not_found;
10013
10014         if (magic == TG3_EEPROM_MAGIC) {
10015                 for (i = 0; i < 256; i += 4) {
10016                         u32 tmp;
10017
10018                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10019                                 goto out_not_found;
10020
10021                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10022                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10023                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10024                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10025                 }
10026         } else {
10027                 int vpd_cap;
10028
10029                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10030                 for (i = 0; i < 256; i += 4) {
10031                         u32 tmp, j = 0;
10032                         u16 tmp16;
10033
10034                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10035                                               i);
10036                         while (j++ < 100) {
10037                                 pci_read_config_word(tp->pdev, vpd_cap +
10038                                                      PCI_VPD_ADDR, &tmp16);
10039                                 if (tmp16 & 0x8000)
10040                                         break;
10041                                 msleep(1);
10042                         }
10043                         if (!(tmp16 & 0x8000))
10044                                 goto out_not_found;
10045
10046                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10047                                               &tmp);
10048                         tmp = cpu_to_le32(tmp);
10049                         memcpy(&vpd_data[i], &tmp, 4);
10050                 }
10051         }
10052
10053         /* Now parse and find the part number. */
10054         for (i = 0; i < 256; ) {
10055                 unsigned char val = vpd_data[i];
10056                 int block_end;
10057
10058                 if (val == 0x82 || val == 0x91) {
10059                         i = (i + 3 +
10060                              (vpd_data[i + 1] +
10061                               (vpd_data[i + 2] << 8)));
10062                         continue;
10063                 }
10064
10065                 if (val != 0x90)
10066                         goto out_not_found;
10067
10068                 block_end = (i + 3 +
10069                              (vpd_data[i + 1] +
10070                               (vpd_data[i + 2] << 8)));
10071                 i += 3;
10072                 while (i < block_end) {
10073                         if (vpd_data[i + 0] == 'P' &&
10074                             vpd_data[i + 1] == 'N') {
10075                                 int partno_len = vpd_data[i + 2];
10076
10077                                 if (partno_len > 24)
10078                                         goto out_not_found;
10079
10080                                 memcpy(tp->board_part_number,
10081                                        &vpd_data[i + 3],
10082                                        partno_len);
10083
10084                                 /* Success. */
10085                                 return;
10086                         }
10087                 }
10088
10089                 /* Part number not found. */
10090                 goto out_not_found;
10091         }
10092
10093 out_not_found:
10094         strcpy(tp->board_part_number, "none");
10095 }
10096
10097 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10098 {
10099         u32 val, offset, start;
10100
10101         if (tg3_nvram_read_swab(tp, 0, &val))
10102                 return;
10103
10104         if (val != TG3_EEPROM_MAGIC)
10105                 return;
10106
10107         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10108             tg3_nvram_read_swab(tp, 0x4, &start))
10109                 return;
10110
10111         offset = tg3_nvram_logical_addr(tp, offset);
10112         if (tg3_nvram_read_swab(tp, offset, &val))
10113                 return;
10114
10115         if ((val & 0xfc000000) == 0x0c000000) {
10116                 u32 ver_offset, addr;
10117                 int i;
10118
10119                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10120                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10121                         return;
10122
10123                 if (val != 0)
10124                         return;
10125
10126                 addr = offset + ver_offset - start;
10127                 for (i = 0; i < 16; i += 4) {
10128                         if (tg3_nvram_read(tp, addr + i, &val))
10129                                 return;
10130
10131                         val = cpu_to_le32(val);
10132                         memcpy(tp->fw_ver + i, &val, 4);
10133                 }
10134         }
10135 }
10136
10137 static int __devinit tg3_get_invariants(struct tg3 *tp)
10138 {
10139         static struct pci_device_id write_reorder_chipsets[] = {
10140                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10141                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10142                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10143                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10144                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10145                              PCI_DEVICE_ID_VIA_8385_0) },
10146                 { },
10147         };
10148         u32 misc_ctrl_reg;
10149         u32 cacheline_sz_reg;
10150         u32 pci_state_reg, grc_misc_cfg;
10151         u32 val;
10152         u16 pci_cmd;
10153         int err;
10154
10155         /* Force memory write invalidate off.  If we leave it on,
10156          * then on 5700_BX chips we have to enable a workaround.
10157          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10158          * to match the cacheline size.  The Broadcom driver have this
10159          * workaround but turns MWI off all the times so never uses
10160          * it.  This seems to suggest that the workaround is insufficient.
10161          */
10162         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10163         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10164         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10165
10166         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10167          * has the register indirect write enable bit set before
10168          * we try to access any of the MMIO registers.  It is also
10169          * critical that the PCI-X hw workaround situation is decided
10170          * before that as well.
10171          */
10172         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10173                               &misc_ctrl_reg);
10174
10175         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10176                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10177
10178         /* Wrong chip ID in 5752 A0. This code can be removed later
10179          * as A0 is not in production.
10180          */
10181         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10182                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10183
10184         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10185          * we need to disable memory and use config. cycles
10186          * only to access all registers. The 5702/03 chips
10187          * can mistakenly decode the special cycles from the
10188          * ICH chipsets as memory write cycles, causing corruption
10189          * of register and memory space. Only certain ICH bridges
10190          * will drive special cycles with non-zero data during the
10191          * address phase which can fall within the 5703's address
10192          * range. This is not an ICH bug as the PCI spec allows
10193          * non-zero address during special cycles. However, only
10194          * these ICH bridges are known to drive non-zero addresses
10195          * during special cycles.
10196          *
10197          * Since special cycles do not cross PCI bridges, we only
10198          * enable this workaround if the 5703 is on the secondary
10199          * bus of these ICH bridges.
10200          */
10201         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10202             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10203                 static struct tg3_dev_id {
10204                         u32     vendor;
10205                         u32     device;
10206                         u32     rev;
10207                 } ich_chipsets[] = {
10208                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10209                           PCI_ANY_ID },
10210                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10211                           PCI_ANY_ID },
10212                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10213                           0xa },
10214                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10215                           PCI_ANY_ID },
10216                         { },
10217                 };
10218                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10219                 struct pci_dev *bridge = NULL;
10220
10221                 while (pci_id->vendor != 0) {
10222                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10223                                                 bridge);
10224                         if (!bridge) {
10225                                 pci_id++;
10226                                 continue;
10227                         }
10228                         if (pci_id->rev != PCI_ANY_ID) {
10229                                 u8 rev;
10230
10231                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10232                                                      &rev);
10233                                 if (rev > pci_id->rev)
10234                                         continue;
10235                         }
10236                         if (bridge->subordinate &&
10237                             (bridge->subordinate->number ==
10238                              tp->pdev->bus->number)) {
10239
10240                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10241                                 pci_dev_put(bridge);
10242                                 break;
10243                         }
10244                 }
10245         }
10246
10247         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10248          * DMA addresses > 40-bit. This bridge may have other additional
10249          * 57xx devices behind it in some 4-port NIC designs for example.
10250          * Any tg3 device found behind the bridge will also need the 40-bit
10251          * DMA workaround.
10252          */
10253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10255                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10256                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10257                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10258         }
10259         else {
10260                 struct pci_dev *bridge = NULL;
10261
10262                 do {
10263                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10264                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10265                                                 bridge);
10266                         if (bridge && bridge->subordinate &&
10267                             (bridge->subordinate->number <=
10268                              tp->pdev->bus->number) &&
10269                             (bridge->subordinate->subordinate >=
10270                              tp->pdev->bus->number)) {
10271                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10272                                 pci_dev_put(bridge);
10273                                 break;
10274                         }
10275                 } while (bridge);
10276         }
10277
10278         /* Initialize misc host control in PCI block. */
10279         tp->misc_host_ctrl |= (misc_ctrl_reg &
10280                                MISC_HOST_CTRL_CHIPREV);
10281         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10282                                tp->misc_host_ctrl);
10283
10284         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10285                               &cacheline_sz_reg);
10286
10287         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10288         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10289         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10290         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10291
10292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10293             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10295             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10296             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10297                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10298
10299         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10300             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10301                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10302
10303         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10304                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10305                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10306                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10307                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10308                 } else {
10309                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10310                                           TG3_FLG2_HW_TSO_1_BUG;
10311                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10312                                 ASIC_REV_5750 &&
10313                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10314                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10315                 }
10316         }
10317
10318         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10319             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10320             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10321             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10322             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10323                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10324
10325         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10326                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10327
10328         /* If we have an AMD 762 or VIA K8T800 chipset, write
10329          * reordering to the mailbox registers done by the host
10330          * controller can cause major troubles.  We read back from
10331          * every mailbox register write to force the writes to be
10332          * posted to the chip in order.
10333          */
10334         if (pci_dev_present(write_reorder_chipsets) &&
10335             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10336                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10337
10338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10339             tp->pci_lat_timer < 64) {
10340                 tp->pci_lat_timer = 64;
10341
10342                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10343                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10344                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10345                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10346
10347                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10348                                        cacheline_sz_reg);
10349         }
10350
10351         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10352                               &pci_state_reg);
10353
10354         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10355                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10356
10357                 /* If this is a 5700 BX chipset, and we are in PCI-X
10358                  * mode, enable register write workaround.
10359                  *
10360                  * The workaround is to use indirect register accesses
10361                  * for all chip writes not to mailbox registers.
10362                  */
10363                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10364                         u32 pm_reg;
10365                         u16 pci_cmd;
10366
10367                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10368
10369                         /* The chip can have it's power management PCI config
10370                          * space registers clobbered due to this bug.
10371                          * So explicitly force the chip into D0 here.
10372                          */
10373                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10374                                               &pm_reg);
10375                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10376                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10377                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10378                                                pm_reg);
10379
10380                         /* Also, force SERR#/PERR# in PCI command. */
10381                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10382                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10383                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10384                 }
10385         }
10386
10387         /* 5700 BX chips need to have their TX producer index mailboxes
10388          * written twice to workaround a bug.
10389          */
10390         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10391                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10392
10393         /* Back to back register writes can cause problems on this chip,
10394          * the workaround is to read back all reg writes except those to
10395          * mailbox regs.  See tg3_write_indirect_reg32().
10396          *
10397          * PCI Express 5750_A0 rev chips need this workaround too.
10398          */
10399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10400             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10401              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10402                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10403
10404         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10405                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10406         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10407                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10408
10409         /* Chip-specific fixup from Broadcom driver */
10410         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10411             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10412                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10413                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10414         }
10415
10416         /* Default fast path register access methods */
10417         tp->read32 = tg3_read32;
10418         tp->write32 = tg3_write32;
10419         tp->read32_mbox = tg3_read32;
10420         tp->write32_mbox = tg3_write32;
10421         tp->write32_tx_mbox = tg3_write32;
10422         tp->write32_rx_mbox = tg3_write32;
10423
10424         /* Various workaround register access methods */
10425         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10426                 tp->write32 = tg3_write_indirect_reg32;
10427         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10428                 tp->write32 = tg3_write_flush_reg32;
10429
10430         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10431             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10432                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10433                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10434                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10435         }
10436
10437         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10438                 tp->read32 = tg3_read_indirect_reg32;
10439                 tp->write32 = tg3_write_indirect_reg32;
10440                 tp->read32_mbox = tg3_read_indirect_mbox;
10441                 tp->write32_mbox = tg3_write_indirect_mbox;
10442                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10443                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10444
10445                 iounmap(tp->regs);
10446                 tp->regs = NULL;
10447
10448                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10449                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10450                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10451         }
10452
10453         if (tp->write32 == tg3_write_indirect_reg32 ||
10454             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10455              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10456               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10457                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10458
10459         /* Get eeprom hw config before calling tg3_set_power_state().
10460          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10461          * determined before calling tg3_set_power_state() so that
10462          * we know whether or not to switch out of Vaux power.
10463          * When the flag is set, it means that GPIO1 is used for eeprom
10464          * write protect and also implies that it is a LOM where GPIOs
10465          * are not used to switch power.
10466          */ 
10467         tg3_get_eeprom_hw_cfg(tp);
10468
10469         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10470          * GPIO1 driven high will bring 5700's external PHY out of reset.
10471          * It is also used as eeprom write protect on LOMs.
10472          */
10473         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10474         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10475             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10476                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10477                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10478         /* Unused GPIO3 must be driven as output on 5752 because there
10479          * are no pull-up resistors on unused GPIO pins.
10480          */
10481         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10482                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10483
10484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10485                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10486
10487         /* Force the chip into D0. */
10488         err = tg3_set_power_state(tp, PCI_D0);
10489         if (err) {
10490                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10491                        pci_name(tp->pdev));
10492                 return err;
10493         }
10494
10495         /* 5700 B0 chips do not support checksumming correctly due
10496          * to hardware bugs.
10497          */
10498         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10499                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10500
10501         /* Derive initial jumbo mode from MTU assigned in
10502          * ether_setup() via the alloc_etherdev() call
10503          */
10504         if (tp->dev->mtu > ETH_DATA_LEN &&
10505             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10506                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10507
10508         /* Determine WakeOnLan speed to use. */
10509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10510             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10511             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10512             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10513                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10514         } else {
10515                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10516         }
10517
10518         /* A few boards don't want Ethernet@WireSpeed phy feature */
10519         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10520             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10521              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10522              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10523             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10524                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10525
10526         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10527             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10528                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10529         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10530                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10531
10532         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10533                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10534                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10535                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10536                 else
10537                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10538         }
10539
10540         tp->coalesce_mode = 0;
10541         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10542             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10543                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10544
10545         /* Initialize MAC MI mode, polling disabled. */
10546         tw32_f(MAC_MI_MODE, tp->mi_mode);
10547         udelay(80);
10548
10549         /* Initialize data/descriptor byte/word swapping. */
10550         val = tr32(GRC_MODE);
10551         val &= GRC_MODE_HOST_STACKUP;
10552         tw32(GRC_MODE, val | tp->grc_mode);
10553
10554         tg3_switch_clocks(tp);
10555
10556         /* Clear this out for sanity. */
10557         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10558
10559         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10560                               &pci_state_reg);
10561         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10562             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10563                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10564
10565                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10566                     chiprevid == CHIPREV_ID_5701_B0 ||
10567                     chiprevid == CHIPREV_ID_5701_B2 ||
10568                     chiprevid == CHIPREV_ID_5701_B5) {
10569                         void __iomem *sram_base;
10570
10571                         /* Write some dummy words into the SRAM status block
10572                          * area, see if it reads back correctly.  If the return
10573                          * value is bad, force enable the PCIX workaround.
10574                          */
10575                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10576
10577                         writel(0x00000000, sram_base);
10578                         writel(0x00000000, sram_base + 4);
10579                         writel(0xffffffff, sram_base + 4);
10580                         if (readl(sram_base) != 0x00000000)
10581                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10582                 }
10583         }
10584
10585         udelay(50);
10586         tg3_nvram_init(tp);
10587
10588         grc_misc_cfg = tr32(GRC_MISC_CFG);
10589         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10590
10591         /* Broadcom's driver says that CIOBE multisplit has a bug */
10592 #if 0
10593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10594             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10595                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10596                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10597         }
10598 #endif
10599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10600             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10601              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10602                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10603
10604         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10605             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10606                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10607         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10608                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10609                                       HOSTCC_MODE_CLRTICK_TXBD);
10610
10611                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10612                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10613                                        tp->misc_host_ctrl);
10614         }
10615
10616         /* these are limited to 10/100 only */
10617         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10618              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10619             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10620              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10621              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10622               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10623               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10624             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10625              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10626               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10627                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10628
10629         err = tg3_phy_probe(tp);
10630         if (err) {
10631                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10632                        pci_name(tp->pdev), err);
10633                 /* ... but do not return immediately ... */
10634         }
10635
10636         tg3_read_partno(tp);
10637         tg3_read_fw_ver(tp);
10638
10639         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10640                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10641         } else {
10642                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10643                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10644                 else
10645                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10646         }
10647
10648         /* 5700 {AX,BX} chips have a broken status block link
10649          * change bit implementation, so we must use the
10650          * status register in those cases.
10651          */
10652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10653                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10654         else
10655                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10656
10657         /* The led_ctrl is set during tg3_phy_probe, here we might
10658          * have to force the link status polling mechanism based
10659          * upon subsystem IDs.
10660          */
10661         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10662             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10663                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10664                                   TG3_FLAG_USE_LINKCHG_REG);
10665         }
10666
10667         /* For all SERDES we poll the MAC status register. */
10668         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10669                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10670         else
10671                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10672
10673         /* All chips before 5787 can get confused if TX buffers
10674          * straddle the 4GB address boundary in some cases.
10675          */
10676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10677             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10678                 tp->dev->hard_start_xmit = tg3_start_xmit;
10679         else
10680                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10681
10682         tp->rx_offset = 2;
10683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10684             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10685                 tp->rx_offset = 0;
10686
10687         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10688
10689         /* Increment the rx prod index on the rx std ring by at most
10690          * 8 for these chips to workaround hw errata.
10691          */
10692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10693             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10695                 tp->rx_std_max_post = 8;
10696
10697         /* By default, disable wake-on-lan.  User can change this
10698          * using ETHTOOL_SWOL.
10699          */
10700         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10701
10702         return err;
10703 }
10704
10705 #ifdef CONFIG_SPARC64
10706 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10707 {
10708         struct net_device *dev = tp->dev;
10709         struct pci_dev *pdev = tp->pdev;
10710         struct pcidev_cookie *pcp = pdev->sysdata;
10711
10712         if (pcp != NULL) {
10713                 unsigned char *addr;
10714                 int len;
10715
10716                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10717                                         &len);
10718                 if (addr && len == 6) {
10719                         memcpy(dev->dev_addr, addr, 6);
10720                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10721                         return 0;
10722                 }
10723         }
10724         return -ENODEV;
10725 }
10726
10727 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10728 {
10729         struct net_device *dev = tp->dev;
10730
10731         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10732         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10733         return 0;
10734 }
10735 #endif
10736
10737 static int __devinit tg3_get_device_address(struct tg3 *tp)
10738 {
10739         struct net_device *dev = tp->dev;
10740         u32 hi, lo, mac_offset;
10741         int addr_ok = 0;
10742
10743 #ifdef CONFIG_SPARC64
10744         if (!tg3_get_macaddr_sparc(tp))
10745                 return 0;
10746 #endif
10747
10748         mac_offset = 0x7c;
10749         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10750             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10751                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10752                         mac_offset = 0xcc;
10753                 if (tg3_nvram_lock(tp))
10754                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10755                 else
10756                         tg3_nvram_unlock(tp);
10757         }
10758
10759         /* First try to get it from MAC address mailbox. */
10760         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10761         if ((hi >> 16) == 0x484b) {
10762                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10763                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10764
10765                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10766                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10767                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10768                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10769                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10770
10771                 /* Some old bootcode may report a 0 MAC address in SRAM */
10772                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10773         }
10774         if (!addr_ok) {
10775                 /* Next, try NVRAM. */
10776                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10777                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10778                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10779                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10780                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10781                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10782                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10783                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10784                 }
10785                 /* Finally just fetch it out of the MAC control regs. */
10786                 else {
10787                         hi = tr32(MAC_ADDR_0_HIGH);
10788                         lo = tr32(MAC_ADDR_0_LOW);
10789
10790                         dev->dev_addr[5] = lo & 0xff;
10791                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10792                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10793                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10794                         dev->dev_addr[1] = hi & 0xff;
10795                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10796                 }
10797         }
10798
10799         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10800 #ifdef CONFIG_SPARC64
10801                 if (!tg3_get_default_macaddr_sparc(tp))
10802                         return 0;
10803 #endif
10804                 return -EINVAL;
10805         }
10806         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10807         return 0;
10808 }
10809
10810 #define BOUNDARY_SINGLE_CACHELINE       1
10811 #define BOUNDARY_MULTI_CACHELINE        2
10812
10813 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10814 {
10815         int cacheline_size;
10816         u8 byte;
10817         int goal;
10818
10819         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10820         if (byte == 0)
10821                 cacheline_size = 1024;
10822         else
10823                 cacheline_size = (int) byte * 4;
10824
10825         /* On 5703 and later chips, the boundary bits have no
10826          * effect.
10827          */
10828         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10829             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10830             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10831                 goto out;
10832
10833 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10834         goal = BOUNDARY_MULTI_CACHELINE;
10835 #else
10836 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10837         goal = BOUNDARY_SINGLE_CACHELINE;
10838 #else
10839         goal = 0;
10840 #endif
10841 #endif
10842
10843         if (!goal)
10844                 goto out;
10845
10846         /* PCI controllers on most RISC systems tend to disconnect
10847          * when a device tries to burst across a cache-line boundary.
10848          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10849          *
10850          * Unfortunately, for PCI-E there are only limited
10851          * write-side controls for this, and thus for reads
10852          * we will still get the disconnects.  We'll also waste
10853          * these PCI cycles for both read and write for chips
10854          * other than 5700 and 5701 which do not implement the
10855          * boundary bits.
10856          */
10857         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10858             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10859                 switch (cacheline_size) {
10860                 case 16:
10861                 case 32:
10862                 case 64:
10863                 case 128:
10864                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10865                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10866                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10867                         } else {
10868                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10869                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10870                         }
10871                         break;
10872
10873                 case 256:
10874                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10875                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10876                         break;
10877
10878                 default:
10879                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10880                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10881                         break;
10882                 };
10883         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10884                 switch (cacheline_size) {
10885                 case 16:
10886                 case 32:
10887                 case 64:
10888                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10889                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10890                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10891                                 break;
10892                         }
10893                         /* fallthrough */
10894                 case 128:
10895                 default:
10896                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10897                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10898                         break;
10899                 };
10900         } else {
10901                 switch (cacheline_size) {
10902                 case 16:
10903                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10904                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10905                                         DMA_RWCTRL_WRITE_BNDRY_16);
10906                                 break;
10907                         }
10908                         /* fallthrough */
10909                 case 32:
10910                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10911                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10912                                         DMA_RWCTRL_WRITE_BNDRY_32);
10913                                 break;
10914                         }
10915                         /* fallthrough */
10916                 case 64:
10917                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10918                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10919                                         DMA_RWCTRL_WRITE_BNDRY_64);
10920                                 break;
10921                         }
10922                         /* fallthrough */
10923                 case 128:
10924                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10925                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10926                                         DMA_RWCTRL_WRITE_BNDRY_128);
10927                                 break;
10928                         }
10929                         /* fallthrough */
10930                 case 256:
10931                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10932                                 DMA_RWCTRL_WRITE_BNDRY_256);
10933                         break;
10934                 case 512:
10935                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10936                                 DMA_RWCTRL_WRITE_BNDRY_512);
10937                         break;
10938                 case 1024:
10939                 default:
10940                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10941                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10942                         break;
10943                 };
10944         }
10945
10946 out:
10947         return val;
10948 }
10949
10950 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10951 {
10952         struct tg3_internal_buffer_desc test_desc;
10953         u32 sram_dma_descs;
10954         int i, ret;
10955
10956         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10957
10958         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10959         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10960         tw32(RDMAC_STATUS, 0);
10961         tw32(WDMAC_STATUS, 0);
10962
10963         tw32(BUFMGR_MODE, 0);
10964         tw32(FTQ_RESET, 0);
10965
10966         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10967         test_desc.addr_lo = buf_dma & 0xffffffff;
10968         test_desc.nic_mbuf = 0x00002100;
10969         test_desc.len = size;
10970
10971         /*
10972          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10973          * the *second* time the tg3 driver was getting loaded after an
10974          * initial scan.
10975          *
10976          * Broadcom tells me:
10977          *   ...the DMA engine is connected to the GRC block and a DMA
10978          *   reset may affect the GRC block in some unpredictable way...
10979          *   The behavior of resets to individual blocks has not been tested.
10980          *
10981          * Broadcom noted the GRC reset will also reset all sub-components.
10982          */
10983         if (to_device) {
10984                 test_desc.cqid_sqid = (13 << 8) | 2;
10985
10986                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10987                 udelay(40);
10988         } else {
10989                 test_desc.cqid_sqid = (16 << 8) | 7;
10990
10991                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10992                 udelay(40);
10993         }
10994         test_desc.flags = 0x00000005;
10995
10996         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10997                 u32 val;
10998
10999                 val = *(((u32 *)&test_desc) + i);
11000                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11001                                        sram_dma_descs + (i * sizeof(u32)));
11002                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11003         }
11004         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11005
11006         if (to_device) {
11007                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11008         } else {
11009                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11010         }
11011
11012         ret = -ENODEV;
11013         for (i = 0; i < 40; i++) {
11014                 u32 val;
11015
11016                 if (to_device)
11017                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11018                 else
11019                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11020                 if ((val & 0xffff) == sram_dma_descs) {
11021                         ret = 0;
11022                         break;
11023                 }
11024
11025                 udelay(100);
11026         }
11027
11028         return ret;
11029 }
11030
11031 #define TEST_BUFFER_SIZE        0x2000
11032
11033 static int __devinit tg3_test_dma(struct tg3 *tp)
11034 {
11035         dma_addr_t buf_dma;
11036         u32 *buf, saved_dma_rwctrl;
11037         int ret;
11038
11039         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11040         if (!buf) {
11041                 ret = -ENOMEM;
11042                 goto out_nofree;
11043         }
11044
11045         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11046                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11047
11048         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11049
11050         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11051                 /* DMA read watermark not used on PCIE */
11052                 tp->dma_rwctrl |= 0x00180000;
11053         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11054                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11055                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11056                         tp->dma_rwctrl |= 0x003f0000;
11057                 else
11058                         tp->dma_rwctrl |= 0x003f000f;
11059         } else {
11060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11062                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11063
11064                         /* If the 5704 is behind the EPB bridge, we can
11065                          * do the less restrictive ONE_DMA workaround for
11066                          * better performance.
11067                          */
11068                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11069                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11070                                 tp->dma_rwctrl |= 0x8000;
11071                         else if (ccval == 0x6 || ccval == 0x7)
11072                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11073
11074                         /* Set bit 23 to enable PCIX hw bug fix */
11075                         tp->dma_rwctrl |= 0x009f0000;
11076                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11077                         /* 5780 always in PCIX mode */
11078                         tp->dma_rwctrl |= 0x00144000;
11079                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11080                         /* 5714 always in PCIX mode */
11081                         tp->dma_rwctrl |= 0x00148000;
11082                 } else {
11083                         tp->dma_rwctrl |= 0x001b000f;
11084                 }
11085         }
11086
11087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11089                 tp->dma_rwctrl &= 0xfffffff0;
11090
11091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11093                 /* Remove this if it causes problems for some boards. */
11094                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11095
11096                 /* On 5700/5701 chips, we need to set this bit.
11097                  * Otherwise the chip will issue cacheline transactions
11098                  * to streamable DMA memory with not all the byte
11099                  * enables turned on.  This is an error on several
11100                  * RISC PCI controllers, in particular sparc64.
11101                  *
11102                  * On 5703/5704 chips, this bit has been reassigned
11103                  * a different meaning.  In particular, it is used
11104                  * on those chips to enable a PCI-X workaround.
11105                  */
11106                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11107         }
11108
11109         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11110
11111 #if 0
11112         /* Unneeded, already done by tg3_get_invariants.  */
11113         tg3_switch_clocks(tp);
11114 #endif
11115
11116         ret = 0;
11117         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11118             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11119                 goto out;
11120
11121         /* It is best to perform DMA test with maximum write burst size
11122          * to expose the 5700/5701 write DMA bug.
11123          */
11124         saved_dma_rwctrl = tp->dma_rwctrl;
11125         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11126         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11127
11128         while (1) {
11129                 u32 *p = buf, i;
11130
11131                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11132                         p[i] = i;
11133
11134                 /* Send the buffer to the chip. */
11135                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11136                 if (ret) {
11137                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11138                         break;
11139                 }
11140
11141 #if 0
11142                 /* validate data reached card RAM correctly. */
11143                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11144                         u32 val;
11145                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11146                         if (le32_to_cpu(val) != p[i]) {
11147                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11148                                 /* ret = -ENODEV here? */
11149                         }
11150                         p[i] = 0;
11151                 }
11152 #endif
11153                 /* Now read it back. */
11154                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11155                 if (ret) {
11156                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11157
11158                         break;
11159                 }
11160
11161                 /* Verify it. */
11162                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11163                         if (p[i] == i)
11164                                 continue;
11165
11166                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11167                             DMA_RWCTRL_WRITE_BNDRY_16) {
11168                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11169                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11170                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11171                                 break;
11172                         } else {
11173                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11174                                 ret = -ENODEV;
11175                                 goto out;
11176                         }
11177                 }
11178
11179                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11180                         /* Success. */
11181                         ret = 0;
11182                         break;
11183                 }
11184         }
11185         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11186             DMA_RWCTRL_WRITE_BNDRY_16) {
11187                 static struct pci_device_id dma_wait_state_chipsets[] = {
11188                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11189                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11190                         { },
11191                 };
11192
11193                 /* DMA test passed without adjusting DMA boundary,
11194                  * now look for chipsets that are known to expose the
11195                  * DMA bug without failing the test.
11196                  */
11197                 if (pci_dev_present(dma_wait_state_chipsets)) {
11198                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11199                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11200                 }
11201                 else
11202                         /* Safe to use the calculated DMA boundary. */
11203                         tp->dma_rwctrl = saved_dma_rwctrl;
11204
11205                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11206         }
11207
11208 out:
11209         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11210 out_nofree:
11211         return ret;
11212 }
11213
11214 static void __devinit tg3_init_link_config(struct tg3 *tp)
11215 {
11216         tp->link_config.advertising =
11217                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11218                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11219                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11220                  ADVERTISED_Autoneg | ADVERTISED_MII);
11221         tp->link_config.speed = SPEED_INVALID;
11222         tp->link_config.duplex = DUPLEX_INVALID;
11223         tp->link_config.autoneg = AUTONEG_ENABLE;
11224         tp->link_config.active_speed = SPEED_INVALID;
11225         tp->link_config.active_duplex = DUPLEX_INVALID;
11226         tp->link_config.phy_is_low_power = 0;
11227         tp->link_config.orig_speed = SPEED_INVALID;
11228         tp->link_config.orig_duplex = DUPLEX_INVALID;
11229         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11230 }
11231
11232 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11233 {
11234         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11235                 tp->bufmgr_config.mbuf_read_dma_low_water =
11236                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11237                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11238                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11239                 tp->bufmgr_config.mbuf_high_water =
11240                         DEFAULT_MB_HIGH_WATER_5705;
11241
11242                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11243                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11244                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11245                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11246                 tp->bufmgr_config.mbuf_high_water_jumbo =
11247                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11248         } else {
11249                 tp->bufmgr_config.mbuf_read_dma_low_water =
11250                         DEFAULT_MB_RDMA_LOW_WATER;
11251                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11252                         DEFAULT_MB_MACRX_LOW_WATER;
11253                 tp->bufmgr_config.mbuf_high_water =
11254                         DEFAULT_MB_HIGH_WATER;
11255
11256                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11257                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11258                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11259                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11260                 tp->bufmgr_config.mbuf_high_water_jumbo =
11261                         DEFAULT_MB_HIGH_WATER_JUMBO;
11262         }
11263
11264         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11265         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11266 }
11267
11268 static char * __devinit tg3_phy_string(struct tg3 *tp)
11269 {
11270         switch (tp->phy_id & PHY_ID_MASK) {
11271         case PHY_ID_BCM5400:    return "5400";
11272         case PHY_ID_BCM5401:    return "5401";
11273         case PHY_ID_BCM5411:    return "5411";
11274         case PHY_ID_BCM5701:    return "5701";
11275         case PHY_ID_BCM5703:    return "5703";
11276         case PHY_ID_BCM5704:    return "5704";
11277         case PHY_ID_BCM5705:    return "5705";
11278         case PHY_ID_BCM5750:    return "5750";
11279         case PHY_ID_BCM5752:    return "5752";
11280         case PHY_ID_BCM5714:    return "5714";
11281         case PHY_ID_BCM5780:    return "5780";
11282         case PHY_ID_BCM5755:    return "5755";
11283         case PHY_ID_BCM5787:    return "5787";
11284         case PHY_ID_BCM8002:    return "8002/serdes";
11285         case 0:                 return "serdes";
11286         default:                return "unknown";
11287         };
11288 }
11289
11290 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11291 {
11292         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11293                 strcpy(str, "PCI Express");
11294                 return str;
11295         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11296                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11297
11298                 strcpy(str, "PCIX:");
11299
11300                 if ((clock_ctrl == 7) ||
11301                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11302                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11303                         strcat(str, "133MHz");
11304                 else if (clock_ctrl == 0)
11305                         strcat(str, "33MHz");
11306                 else if (clock_ctrl == 2)
11307                         strcat(str, "50MHz");
11308                 else if (clock_ctrl == 4)
11309                         strcat(str, "66MHz");
11310                 else if (clock_ctrl == 6)
11311                         strcat(str, "100MHz");
11312         } else {
11313                 strcpy(str, "PCI:");
11314                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11315                         strcat(str, "66MHz");
11316                 else
11317                         strcat(str, "33MHz");
11318         }
11319         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11320                 strcat(str, ":32-bit");
11321         else
11322                 strcat(str, ":64-bit");
11323         return str;
11324 }
11325
11326 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11327 {
11328         struct pci_dev *peer;
11329         unsigned int func, devnr = tp->pdev->devfn & ~7;
11330
11331         for (func = 0; func < 8; func++) {
11332                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11333                 if (peer && peer != tp->pdev)
11334                         break;
11335                 pci_dev_put(peer);
11336         }
11337         /* 5704 can be configured in single-port mode, set peer to
11338          * tp->pdev in that case.
11339          */
11340         if (!peer) {
11341                 peer = tp->pdev;
11342                 return peer;
11343         }
11344
11345         /*
11346          * We don't need to keep the refcount elevated; there's no way
11347          * to remove one half of this device without removing the other
11348          */
11349         pci_dev_put(peer);
11350
11351         return peer;
11352 }
11353
11354 static void __devinit tg3_init_coal(struct tg3 *tp)
11355 {
11356         struct ethtool_coalesce *ec = &tp->coal;
11357
11358         memset(ec, 0, sizeof(*ec));
11359         ec->cmd = ETHTOOL_GCOALESCE;
11360         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11361         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11362         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11363         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11364         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11365         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11366         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11367         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11368         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11369
11370         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11371                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11372                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11373                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11374                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11375                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11376         }
11377
11378         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11379                 ec->rx_coalesce_usecs_irq = 0;
11380                 ec->tx_coalesce_usecs_irq = 0;
11381                 ec->stats_block_coalesce_usecs = 0;
11382         }
11383 }
11384
11385 static int __devinit tg3_init_one(struct pci_dev *pdev,
11386                                   const struct pci_device_id *ent)
11387 {
11388         static int tg3_version_printed = 0;
11389         unsigned long tg3reg_base, tg3reg_len;
11390         struct net_device *dev;
11391         struct tg3 *tp;
11392         int i, err, pm_cap;
11393         char str[40];
11394         u64 dma_mask, persist_dma_mask;
11395
11396         if (tg3_version_printed++ == 0)
11397                 printk(KERN_INFO "%s", version);
11398
11399         err = pci_enable_device(pdev);
11400         if (err) {
11401                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11402                        "aborting.\n");
11403                 return err;
11404         }
11405
11406         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11407                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11408                        "base address, aborting.\n");
11409                 err = -ENODEV;
11410                 goto err_out_disable_pdev;
11411         }
11412
11413         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11414         if (err) {
11415                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11416                        "aborting.\n");
11417                 goto err_out_disable_pdev;
11418         }
11419
11420         pci_set_master(pdev);
11421
11422         /* Find power-management capability. */
11423         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11424         if (pm_cap == 0) {
11425                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11426                        "aborting.\n");
11427                 err = -EIO;
11428                 goto err_out_free_res;
11429         }
11430
11431         tg3reg_base = pci_resource_start(pdev, 0);
11432         tg3reg_len = pci_resource_len(pdev, 0);
11433
11434         dev = alloc_etherdev(sizeof(*tp));
11435         if (!dev) {
11436                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11437                 err = -ENOMEM;
11438                 goto err_out_free_res;
11439         }
11440
11441         SET_MODULE_OWNER(dev);
11442         SET_NETDEV_DEV(dev, &pdev->dev);
11443
11444 #if TG3_VLAN_TAG_USED
11445         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11446         dev->vlan_rx_register = tg3_vlan_rx_register;
11447         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11448 #endif
11449
11450         tp = netdev_priv(dev);
11451         tp->pdev = pdev;
11452         tp->dev = dev;
11453         tp->pm_cap = pm_cap;
11454         tp->mac_mode = TG3_DEF_MAC_MODE;
11455         tp->rx_mode = TG3_DEF_RX_MODE;
11456         tp->tx_mode = TG3_DEF_TX_MODE;
11457         tp->mi_mode = MAC_MI_MODE_BASE;
11458         if (tg3_debug > 0)
11459                 tp->msg_enable = tg3_debug;
11460         else
11461                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11462
11463         /* The word/byte swap controls here control register access byte
11464          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11465          * setting below.
11466          */
11467         tp->misc_host_ctrl =
11468                 MISC_HOST_CTRL_MASK_PCI_INT |
11469                 MISC_HOST_CTRL_WORD_SWAP |
11470                 MISC_HOST_CTRL_INDIR_ACCESS |
11471                 MISC_HOST_CTRL_PCISTATE_RW;
11472
11473         /* The NONFRM (non-frame) byte/word swap controls take effect
11474          * on descriptor entries, anything which isn't packet data.
11475          *
11476          * The StrongARM chips on the board (one for tx, one for rx)
11477          * are running in big-endian mode.
11478          */
11479         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11480                         GRC_MODE_WSWAP_NONFRM_DATA);
11481 #ifdef __BIG_ENDIAN
11482         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11483 #endif
11484         spin_lock_init(&tp->lock);
11485         spin_lock_init(&tp->indirect_lock);
11486         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11487
11488         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11489         if (tp->regs == 0UL) {
11490                 printk(KERN_ERR PFX "Cannot map device registers, "
11491                        "aborting.\n");
11492                 err = -ENOMEM;
11493                 goto err_out_free_dev;
11494         }
11495
11496         tg3_init_link_config(tp);
11497
11498         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11499         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11500         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11501
11502         dev->open = tg3_open;
11503         dev->stop = tg3_close;
11504         dev->get_stats = tg3_get_stats;
11505         dev->set_multicast_list = tg3_set_rx_mode;
11506         dev->set_mac_address = tg3_set_mac_addr;
11507         dev->do_ioctl = tg3_ioctl;
11508         dev->tx_timeout = tg3_tx_timeout;
11509         dev->poll = tg3_poll;
11510         dev->ethtool_ops = &tg3_ethtool_ops;
11511         dev->weight = 64;
11512         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11513         dev->change_mtu = tg3_change_mtu;
11514         dev->irq = pdev->irq;
11515 #ifdef CONFIG_NET_POLL_CONTROLLER
11516         dev->poll_controller = tg3_poll_controller;
11517 #endif
11518
11519         err = tg3_get_invariants(tp);
11520         if (err) {
11521                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11522                        "aborting.\n");
11523                 goto err_out_iounmap;
11524         }
11525
11526         /* The EPB bridge inside 5714, 5715, and 5780 and any
11527          * device behind the EPB cannot support DMA addresses > 40-bit.
11528          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11529          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11530          * do DMA address check in tg3_start_xmit().
11531          */
11532         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11533                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11534         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11535                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11536 #ifdef CONFIG_HIGHMEM
11537                 dma_mask = DMA_64BIT_MASK;
11538 #endif
11539         } else
11540                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11541
11542         /* Configure DMA attributes. */
11543         if (dma_mask > DMA_32BIT_MASK) {
11544                 err = pci_set_dma_mask(pdev, dma_mask);
11545                 if (!err) {
11546                         dev->features |= NETIF_F_HIGHDMA;
11547                         err = pci_set_consistent_dma_mask(pdev,
11548                                                           persist_dma_mask);
11549                         if (err < 0) {
11550                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11551                                        "DMA for consistent allocations\n");
11552                                 goto err_out_iounmap;
11553                         }
11554                 }
11555         }
11556         if (err || dma_mask == DMA_32BIT_MASK) {
11557                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11558                 if (err) {
11559                         printk(KERN_ERR PFX "No usable DMA configuration, "
11560                                "aborting.\n");
11561                         goto err_out_iounmap;
11562                 }
11563         }
11564
11565         tg3_init_bufmgr_config(tp);
11566
11567 #if TG3_TSO_SUPPORT != 0
11568         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11569                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11570         }
11571         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11573             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11574             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11575                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11576         } else {
11577                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11578         }
11579
11580         /* TSO is on by default on chips that support hardware TSO.
11581          * Firmware TSO on older chips gives lower performance, so it
11582          * is off by default, but can be enabled using ethtool.
11583          */
11584         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11585                 dev->features |= NETIF_F_TSO;
11586                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11587                         dev->features |= NETIF_F_TSO6;
11588         }
11589
11590 #endif
11591
11592         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11593             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11594             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11595                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11596                 tp->rx_pending = 63;
11597         }
11598
11599         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11600             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11601                 tp->pdev_peer = tg3_find_peer(tp);
11602
11603         err = tg3_get_device_address(tp);
11604         if (err) {
11605                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11606                        "aborting.\n");
11607                 goto err_out_iounmap;
11608         }
11609
11610         /*
11611          * Reset chip in case UNDI or EFI driver did not shutdown
11612          * DMA self test will enable WDMAC and we'll see (spurious)
11613          * pending DMA on the PCI bus at that point.
11614          */
11615         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11616             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11617                 pci_save_state(tp->pdev);
11618                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11619                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11620         }
11621
11622         err = tg3_test_dma(tp);
11623         if (err) {
11624                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11625                 goto err_out_iounmap;
11626         }
11627
11628         /* Tigon3 can do ipv4 only... and some chips have buggy
11629          * checksumming.
11630          */
11631         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11632                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11633                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11634                         dev->features |= NETIF_F_HW_CSUM;
11635                 else
11636                         dev->features |= NETIF_F_IP_CSUM;
11637                 dev->features |= NETIF_F_SG;
11638                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11639         } else
11640                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11641
11642         /* flow control autonegotiation is default behavior */
11643         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11644
11645         tg3_init_coal(tp);
11646
11647         /* Now that we have fully setup the chip, save away a snapshot
11648          * of the PCI config space.  We need to restore this after
11649          * GRC_MISC_CFG core clock resets and some resume events.
11650          */
11651         pci_save_state(tp->pdev);
11652
11653         err = register_netdev(dev);
11654         if (err) {
11655                 printk(KERN_ERR PFX "Cannot register net device, "
11656                        "aborting.\n");
11657                 goto err_out_iounmap;
11658         }
11659
11660         pci_set_drvdata(pdev, dev);
11661
11662         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11663                dev->name,
11664                tp->board_part_number,
11665                tp->pci_chip_rev_id,
11666                tg3_phy_string(tp),
11667                tg3_bus_string(tp, str),
11668                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11669
11670         for (i = 0; i < 6; i++)
11671                 printk("%2.2x%c", dev->dev_addr[i],
11672                        i == 5 ? '\n' : ':');
11673
11674         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11675                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11676                "TSOcap[%d] \n",
11677                dev->name,
11678                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11679                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11680                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11681                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11682                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11683                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11684                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11685         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11686                dev->name, tp->dma_rwctrl,
11687                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11688                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11689
11690         netif_carrier_off(tp->dev);
11691
11692         return 0;
11693
11694 err_out_iounmap:
11695         if (tp->regs) {
11696                 iounmap(tp->regs);
11697                 tp->regs = NULL;
11698         }
11699
11700 err_out_free_dev:
11701         free_netdev(dev);
11702
11703 err_out_free_res:
11704         pci_release_regions(pdev);
11705
11706 err_out_disable_pdev:
11707         pci_disable_device(pdev);
11708         pci_set_drvdata(pdev, NULL);
11709         return err;
11710 }
11711
11712 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11713 {
11714         struct net_device *dev = pci_get_drvdata(pdev);
11715
11716         if (dev) {
11717                 struct tg3 *tp = netdev_priv(dev);
11718
11719                 flush_scheduled_work();
11720                 unregister_netdev(dev);
11721                 if (tp->regs) {
11722                         iounmap(tp->regs);
11723                         tp->regs = NULL;
11724                 }
11725                 free_netdev(dev);
11726                 pci_release_regions(pdev);
11727                 pci_disable_device(pdev);
11728                 pci_set_drvdata(pdev, NULL);
11729         }
11730 }
11731
11732 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11733 {
11734         struct net_device *dev = pci_get_drvdata(pdev);
11735         struct tg3 *tp = netdev_priv(dev);
11736         int err;
11737
11738         if (!netif_running(dev))
11739                 return 0;
11740
11741         flush_scheduled_work();
11742         tg3_netif_stop(tp);
11743
11744         del_timer_sync(&tp->timer);
11745
11746         tg3_full_lock(tp, 1);
11747         tg3_disable_ints(tp);
11748         tg3_full_unlock(tp);
11749
11750         netif_device_detach(dev);
11751
11752         tg3_full_lock(tp, 0);
11753         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11754         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11755         tg3_full_unlock(tp);
11756
11757         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11758         if (err) {
11759                 tg3_full_lock(tp, 0);
11760
11761                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11762                 if (tg3_restart_hw(tp, 1))
11763                         goto out;
11764
11765                 tp->timer.expires = jiffies + tp->timer_offset;
11766                 add_timer(&tp->timer);
11767
11768                 netif_device_attach(dev);
11769                 tg3_netif_start(tp);
11770
11771 out:
11772                 tg3_full_unlock(tp);
11773         }
11774
11775         return err;
11776 }
11777
11778 static int tg3_resume(struct pci_dev *pdev)
11779 {
11780         struct net_device *dev = pci_get_drvdata(pdev);
11781         struct tg3 *tp = netdev_priv(dev);
11782         int err;
11783
11784         if (!netif_running(dev))
11785                 return 0;
11786
11787         pci_restore_state(tp->pdev);
11788
11789         err = tg3_set_power_state(tp, PCI_D0);
11790         if (err)
11791                 return err;
11792
11793         netif_device_attach(dev);
11794
11795         tg3_full_lock(tp, 0);
11796
11797         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11798         err = tg3_restart_hw(tp, 1);
11799         if (err)
11800                 goto out;
11801
11802         tp->timer.expires = jiffies + tp->timer_offset;
11803         add_timer(&tp->timer);
11804
11805         tg3_netif_start(tp);
11806
11807 out:
11808         tg3_full_unlock(tp);
11809
11810         return err;
11811 }
11812
11813 static struct pci_driver tg3_driver = {
11814         .name           = DRV_MODULE_NAME,
11815         .id_table       = tg3_pci_tbl,
11816         .probe          = tg3_init_one,
11817         .remove         = __devexit_p(tg3_remove_one),
11818         .suspend        = tg3_suspend,
11819         .resume         = tg3_resume
11820 };
11821
11822 static int __init tg3_init(void)
11823 {
11824         return pci_module_init(&tg3_driver);
11825 }
11826
11827 static void __exit tg3_cleanup(void)
11828 {
11829         pci_unregister_driver(&tg3_driver);
11830 }
11831
11832 module_init(tg3_init);
11833 module_exit(tg3_cleanup);