back ported version 3.66f tg3 driver
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19 #include <linux/version.h>
20
21 #if (LINUX_VERSION_CODE < 0x020500)
22 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
23 #define MODVERSIONS
24 #include <linux/modversions.h>
25 #endif
26 #endif
27 #include <linux/module.h>
28 #if (LINUX_VERSION_CODE >= 0x20600)
29 #include <linux/moduleparam.h>
30 #endif
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/compiler.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/in.h>
37 #include <linux/init.h>
38 #include <linux/ioport.h>
39 #include <linux/pci.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/skbuff.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/ip.h>
47 #include <linux/tcp.h>
48 #if (LINUX_VERSION_CODE >= 0x20600)
49 #include <linux/workqueue.h>
50 #endif
51 #include <linux/prefetch.h>
52 #if (LINUX_VERSION_CODE >= 0x020600)
53 #include <linux/dma-mapping.h>
54 #endif
55 #include <linux/bitops.h>
56
57 #include <net/checksum.h>
58
59 #include <asm/system.h>
60 #include <asm/io.h>
61 #include <asm/byteorder.h>
62 #include <asm/uaccess.h>
63
64 #ifdef CONFIG_SPARC64
65 #include <asm/idprom.h>
66 #include <asm/oplib.h>
67 #include <asm/pbm.h>
68 #endif
69
70 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
71 #define TG3_VLAN_TAG_USED 1
72 #else
73 #define TG3_VLAN_TAG_USED 0
74 #endif
75
76 #ifdef NETIF_F_TSO
77 #define TG3_TSO_SUPPORT 1
78 #else
79 #define TG3_TSO_SUPPORT 0
80 #endif
81
82 #include "tg3.h"
83
84 #define DRV_MODULE_NAME         "tg3"
85 #define PFX DRV_MODULE_NAME     ": "
86 #define DRV_MODULE_VERSION      "3.66f"
87 #define DRV_MODULE_RELDATE      "September 1, 2006"
88
89 #define TG3_DEF_MAC_MODE        0
90 #define TG3_DEF_RX_MODE         0
91 #define TG3_DEF_TX_MODE         0
92 #define TG3_DEF_MSG_ENABLE        \
93         (NETIF_MSG_DRV          | \
94          NETIF_MSG_PROBE        | \
95          NETIF_MSG_LINK         | \
96          NETIF_MSG_TIMER        | \
97          NETIF_MSG_IFDOWN       | \
98          NETIF_MSG_IFUP         | \
99          NETIF_MSG_RX_ERR       | \
100          NETIF_MSG_TX_ERR)
101
102 /* length of time before we decide the hardware is borked,
103  * and dev->tx_timeout() should be called to fix the problem
104  */
105 #define TG3_TX_TIMEOUT                  (5 * HZ)
106
107 /* hardware minimum and maximum for a single frame's data payload */
108 #define TG3_MIN_MTU                     60
109 #define TG3_MAX_MTU(tp) \
110         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
111
112 /* These numbers seem to be hard coded in the NIC firmware somehow.
113  * You can't change the ring sizes, but you can change where you place
114  * them in the NIC onboard memory.
115  */
116 #define TG3_RX_RING_SIZE                512
117 #define TG3_DEF_RX_RING_PENDING         200
118 #define TG3_RX_JUMBO_RING_SIZE          256
119 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
120
121 /* Do not place this n-ring entries value into the tp struct itself,
122  * we really want to expose these constants to GCC so that modulo et
123  * al.  operations are done with shifts and masks instead of with
124  * hw multiply/modulo instructions.  Another solution would be to
125  * replace things like '% foo' with '& (foo - 1)'.
126  */
127 #define TG3_RX_RCB_RING_SIZE(tp)        \
128         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
129
130 #define TG3_TX_RING_SIZE                512
131 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
132
133 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
134                                  TG3_RX_RING_SIZE)
135 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
136                                  TG3_RX_JUMBO_RING_SIZE)
137 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
138                                    TG3_RX_RCB_RING_SIZE(tp))
139 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
140                                  TG3_TX_RING_SIZE)
141 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
142
143 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
144 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
145
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
148
149 /* number of ETHTOOL_GSTATS u64's */
150 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
151
152 #define TG3_NUM_TEST            6
153
154 static char version[] __devinitdata =
155         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
156
157 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
158 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_MODULE_VERSION);
161
162 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
163 #if (LINUX_VERSION_CODE >= 0x20600)
164 module_param(tg3_debug, int, 0);
165 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
166 #endif
167
168 static struct pci_device_id tg3_pci_tbl[] = {
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
248           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
250           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
252           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
254           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
256           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
258           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
260           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
262           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
264           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
265         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
266           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
267         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
268           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
269         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
270           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
271         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
272           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
273         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
274           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
275         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
276           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
277         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
278           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
279         { 0, }
280 };
281
282 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
283
284 static struct {
285         const char string[ETH_GSTRING_LEN];
286 } ethtool_stats_keys[TG3_NUM_STATS] = {
287         { "rx_octets" },
288         { "rx_fragments" },
289         { "rx_ucast_packets" },
290         { "rx_mcast_packets" },
291         { "rx_bcast_packets" },
292         { "rx_fcs_errors" },
293         { "rx_align_errors" },
294         { "rx_xon_pause_rcvd" },
295         { "rx_xoff_pause_rcvd" },
296         { "rx_mac_ctrl_rcvd" },
297         { "rx_xoff_entered" },
298         { "rx_frame_too_long_errors" },
299         { "rx_jabbers" },
300         { "rx_undersize_packets" },
301         { "rx_in_length_errors" },
302         { "rx_out_length_errors" },
303         { "rx_64_or_less_octet_packets" },
304         { "rx_65_to_127_octet_packets" },
305         { "rx_128_to_255_octet_packets" },
306         { "rx_256_to_511_octet_packets" },
307         { "rx_512_to_1023_octet_packets" },
308         { "rx_1024_to_1522_octet_packets" },
309         { "rx_1523_to_2047_octet_packets" },
310         { "rx_2048_to_4095_octet_packets" },
311         { "rx_4096_to_8191_octet_packets" },
312         { "rx_8192_to_9022_octet_packets" },
313
314         { "tx_octets" },
315         { "tx_collisions" },
316
317         { "tx_xon_sent" },
318         { "tx_xoff_sent" },
319         { "tx_flow_control" },
320         { "tx_mac_errors" },
321         { "tx_single_collisions" },
322         { "tx_mult_collisions" },
323         { "tx_deferred" },
324         { "tx_excessive_collisions" },
325         { "tx_late_collisions" },
326         { "tx_collide_2times" },
327         { "tx_collide_3times" },
328         { "tx_collide_4times" },
329         { "tx_collide_5times" },
330         { "tx_collide_6times" },
331         { "tx_collide_7times" },
332         { "tx_collide_8times" },
333         { "tx_collide_9times" },
334         { "tx_collide_10times" },
335         { "tx_collide_11times" },
336         { "tx_collide_12times" },
337         { "tx_collide_13times" },
338         { "tx_collide_14times" },
339         { "tx_collide_15times" },
340         { "tx_ucast_packets" },
341         { "tx_mcast_packets" },
342         { "tx_bcast_packets" },
343         { "tx_carrier_sense_errors" },
344         { "tx_discards" },
345         { "tx_errors" },
346
347         { "dma_writeq_full" },
348         { "dma_write_prioq_full" },
349         { "rxbds_empty" },
350         { "rx_discards" },
351         { "rx_errors" },
352         { "rx_threshold_hit" },
353
354         { "dma_readq_full" },
355         { "dma_read_prioq_full" },
356         { "tx_comp_queue_full" },
357
358         { "ring_set_send_prod_index" },
359         { "ring_status_update" },
360         { "nic_irqs" },
361         { "nic_avoided_irqs" },
362         { "nic_tx_threshold_hit" }
363 };
364
365 static struct {
366         const char string[ETH_GSTRING_LEN];
367 } ethtool_test_keys[TG3_NUM_TEST] = {
368         { "nvram test     (online) " },
369         { "link test      (online) " },
370         { "register test  (offline)" },
371         { "memory test    (offline)" },
372         { "loopback test  (offline)" },
373         { "interrupt test (offline)" },
374 };
375
376 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
377 {
378         writel(val, tp->regs + off);
379 }
380
381 static u32 tg3_read32(struct tg3 *tp, u32 off)
382 {
383         return (readl(tp->regs + off)); 
384 }
385
386 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
387 {
388         unsigned long flags;
389
390         spin_lock_irqsave(&tp->indirect_lock, flags);
391         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
392         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
393         spin_unlock_irqrestore(&tp->indirect_lock, flags);
394 }
395
396 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
397 {
398         writel(val, tp->regs + off);
399         readl(tp->regs + off);
400 }
401
402 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
403 {
404         unsigned long flags;
405         u32 val;
406
407         spin_lock_irqsave(&tp->indirect_lock, flags);
408         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
409         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
410         spin_unlock_irqrestore(&tp->indirect_lock, flags);
411         return val;
412 }
413
414 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
415 {
416         unsigned long flags;
417
418         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
419                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
420                                        TG3_64BIT_REG_LOW, val);
421                 return;
422         }
423         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
425                                        TG3_64BIT_REG_LOW, val);
426                 return;
427         }
428
429         spin_lock_irqsave(&tp->indirect_lock, flags);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
431         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
432         spin_unlock_irqrestore(&tp->indirect_lock, flags);
433
434         /* In indirect mode when disabling interrupts, we also need
435          * to clear the interrupt bit in the GRC local ctrl register.
436          */
437         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
438             (val == 0x1)) {
439                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
440                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
441         }
442 }
443
444 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
445 {
446         unsigned long flags;
447         u32 val;
448
449         spin_lock_irqsave(&tp->indirect_lock, flags);
450         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
451         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
452         spin_unlock_irqrestore(&tp->indirect_lock, flags);
453         return val;
454 }
455
456 /* usec_wait specifies the wait time in usec when writing to certain registers
457  * where it is unsafe to read back the register without some delay.
458  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
459  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
460  */
461 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
462 {
463         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
464             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
465                 /* Non-posted methods */
466                 tp->write32(tp, off, val);
467         else {
468                 /* Posted method */
469                 tg3_write32(tp, off, val);
470                 if (usec_wait)
471                         udelay(usec_wait);
472                 tp->read32(tp, off);
473         }
474         /* Wait again after the read for the posted method to guarantee that
475          * the wait time is met.
476          */
477         if (usec_wait)
478                 udelay(usec_wait);
479 }
480
481 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
482 {
483         tp->write32_mbox(tp, off, val);
484         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
485             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
486                 tp->read32_mbox(tp, off);
487 }
488
489 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
490 {
491         void __iomem *mbox = tp->regs + off;
492         writel(val, mbox);
493         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
494                 writel(val, mbox);
495         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
496                 readl(mbox);
497 }
498
499 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
500 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
501 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
502 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
503 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
504
505 #define tw32(reg,val)           tp->write32(tp, reg, val)
506 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
507 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
508 #define tr32(reg)               tp->read32(tp, reg)
509
510 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         spin_lock_irqsave(&tp->indirect_lock, flags);
515         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
517                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
518
519                 /* Always leave this as zero. */
520                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
521         } else {
522                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
523                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
524
525                 /* Always leave this as zero. */
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         }
528         spin_unlock_irqrestore(&tp->indirect_lock, flags);
529 }
530
531 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
532 {
533         unsigned long flags;
534
535         spin_lock_irqsave(&tp->indirect_lock, flags);
536         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
537                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
538                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
539
540                 /* Always leave this as zero. */
541                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
542         } else {
543                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
544                 *val = tr32(TG3PCI_MEM_WIN_DATA);
545
546                 /* Always leave this as zero. */
547                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
548         }
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 }
551
552 static void tg3_disable_ints(struct tg3 *tp)
553 {
554         tw32(TG3PCI_MISC_HOST_CTRL,
555              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
556         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
557 }
558
559 static inline void tg3_cond_int(struct tg3 *tp)
560 {
561         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
562             (tp->hw_status->status & SD_STATUS_UPDATED))
563                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
564         else
565                 tw32(HOSTCC_MODE, tp->coalesce_mode |
566                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
567 }
568
569 static void tg3_enable_ints(struct tg3 *tp)
570 {
571         tp->irq_sync = 0;
572         wmb();
573
574         tw32(TG3PCI_MISC_HOST_CTRL,
575              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
576         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
577                        (tp->last_tag << 24));
578         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
579                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
580                                (tp->last_tag << 24));
581         tg3_cond_int(tp);
582 }
583
584 static inline unsigned int tg3_has_work(struct tg3 *tp)
585 {
586         struct tg3_hw_status *sblk = tp->hw_status;
587         unsigned int work_exists = 0;
588
589         /* check for phy events */
590         if (!(tp->tg3_flags &
591               (TG3_FLAG_USE_LINKCHG_REG |
592                TG3_FLAG_POLL_SERDES))) {
593                 if (sblk->status & SD_STATUS_LINK_CHG)
594                         work_exists = 1;
595         }
596         /* check for RX/TX work to do */
597         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
598             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
599                 work_exists = 1;
600
601         return work_exists;
602 }
603
604 /* tg3_restart_ints
605  *  similar to tg3_enable_ints, but it accurately determines whether there
606  *  is new work pending and can return without flushing the PIO write
607  *  which reenables interrupts 
608  */
609 static void tg3_restart_ints(struct tg3 *tp)
610 {
611         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
612                      tp->last_tag << 24);
613         mmiowb();
614
615         /* When doing tagged status, this work check is unnecessary.
616          * The last_tag we write above tells the chip which piece of
617          * work we've completed.
618          */
619         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
620             tg3_has_work(tp))
621                 tw32(HOSTCC_MODE, tp->coalesce_mode |
622                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
623 }
624
625 static inline void tg3_netif_stop(struct tg3 *tp)
626 {
627         tp->dev->trans_start = jiffies; /* prevent tx timeout */
628         netif_poll_disable(tp->dev);
629         netif_tx_disable(tp->dev);
630 }
631
632 static inline void tg3_netif_start(struct tg3 *tp)
633 {
634         netif_wake_queue(tp->dev);
635         /* NOTE: unconditional netif_wake_queue is only appropriate
636          * so long as all callers are assured to have free tx slots
637          * (such as after tg3_init_hw)
638          */
639         netif_poll_enable(tp->dev);
640         tp->hw_status->status |= SD_STATUS_UPDATED;
641         tg3_enable_ints(tp);
642 }
643
644 static void tg3_switch_clocks(struct tg3 *tp)
645 {
646         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
647         u32 orig_clock_ctrl;
648
649         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
650                 return;
651
652         orig_clock_ctrl = clock_ctrl;
653         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
654                        CLOCK_CTRL_CLKRUN_OENABLE |
655                        0x1f);
656         tp->pci_clock_ctrl = clock_ctrl;
657
658         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
659                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
660                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
661                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
662                 }
663         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
664                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
665                             clock_ctrl |
666                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
667                             40);
668                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
669                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
670                             40);
671         }
672         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
673 }
674
675 #define PHY_BUSY_LOOPS  5000
676
677 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
678 {
679         u32 frame_val;
680         unsigned int loops;
681         int ret;
682
683         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
684                 tw32_f(MAC_MI_MODE,
685                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
686                 udelay(80);
687         }
688
689         *val = 0x0;
690
691         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
692                       MI_COM_PHY_ADDR_MASK);
693         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
694                       MI_COM_REG_ADDR_MASK);
695         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
696         
697         tw32_f(MAC_MI_COM, frame_val);
698
699         loops = PHY_BUSY_LOOPS;
700         while (loops != 0) {
701                 udelay(10);
702                 frame_val = tr32(MAC_MI_COM);
703
704                 if ((frame_val & MI_COM_BUSY) == 0) {
705                         udelay(5);
706                         frame_val = tr32(MAC_MI_COM);
707                         break;
708                 }
709                 loops -= 1;
710         }
711
712         ret = -EBUSY;
713         if (loops != 0) {
714                 *val = frame_val & MI_COM_DATA_MASK;
715                 ret = 0;
716         }
717
718         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
719                 tw32_f(MAC_MI_MODE, tp->mi_mode);
720                 udelay(80);
721         }
722
723         return ret;
724 }
725
726 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
727 {
728         u32 frame_val;
729         unsigned int loops;
730         int ret;
731
732         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
733                 tw32_f(MAC_MI_MODE,
734                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
735                 udelay(80);
736         }
737
738         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
739                       MI_COM_PHY_ADDR_MASK);
740         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
741                       MI_COM_REG_ADDR_MASK);
742         frame_val |= (val & MI_COM_DATA_MASK);
743         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
744         
745         tw32_f(MAC_MI_COM, frame_val);
746
747         loops = PHY_BUSY_LOOPS;
748         while (loops != 0) {
749                 udelay(10);
750                 frame_val = tr32(MAC_MI_COM);
751                 if ((frame_val & MI_COM_BUSY) == 0) {
752                         udelay(5);
753                         frame_val = tr32(MAC_MI_COM);
754                         break;
755                 }
756                 loops -= 1;
757         }
758
759         ret = -EBUSY;
760         if (loops != 0)
761                 ret = 0;
762
763         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
764                 tw32_f(MAC_MI_MODE, tp->mi_mode);
765                 udelay(80);
766         }
767
768         return ret;
769 }
770
771 static void tg3_phy_set_wirespeed(struct tg3 *tp)
772 {
773         u32 val;
774
775         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
776                 return;
777
778         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
779             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
780                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
781                              (val | (1 << 15) | (1 << 4)));
782 }
783
784 static int tg3_bmcr_reset(struct tg3 *tp)
785 {
786         u32 phy_control;
787         int limit, err;
788
789         /* OK, reset it, and poll the BMCR_RESET bit until it
790          * clears or we time out.
791          */
792         phy_control = BMCR_RESET;
793         err = tg3_writephy(tp, MII_BMCR, phy_control);
794         if (err != 0)
795                 return -EBUSY;
796
797         limit = 5000;
798         while (limit--) {
799                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
800                 if (err != 0)
801                         return -EBUSY;
802
803                 if ((phy_control & BMCR_RESET) == 0) {
804                         udelay(40);
805                         break;
806                 }
807                 udelay(10);
808         }
809         if (limit <= 0)
810                 return -EBUSY;
811
812         return 0;
813 }
814
815 static int tg3_wait_macro_done(struct tg3 *tp)
816 {
817         int limit = 100;
818
819         while (limit--) {
820                 u32 tmp32;
821
822                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
823                         if ((tmp32 & 0x1000) == 0)
824                                 break;
825                 }
826         }
827         if (limit <= 0)
828                 return -EBUSY;
829
830         return 0;
831 }
832
833 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
834 {
835         static const u32 test_pat[4][6] = {
836         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
837         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
838         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
839         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
840         };
841         int chan;
842
843         for (chan = 0; chan < 4; chan++) {
844                 int i;
845
846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
847                              (chan * 0x2000) | 0x0200);
848                 tg3_writephy(tp, 0x16, 0x0002);
849
850                 for (i = 0; i < 6; i++)
851                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
852                                      test_pat[chan][i]);
853
854                 tg3_writephy(tp, 0x16, 0x0202);
855                 if (tg3_wait_macro_done(tp)) {
856                         *resetp = 1;
857                         return -EBUSY;
858                 }
859
860                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
861                              (chan * 0x2000) | 0x0200);
862                 tg3_writephy(tp, 0x16, 0x0082);
863                 if (tg3_wait_macro_done(tp)) {
864                         *resetp = 1;
865                         return -EBUSY;
866                 }
867
868                 tg3_writephy(tp, 0x16, 0x0802);
869                 if (tg3_wait_macro_done(tp)) {
870                         *resetp = 1;
871                         return -EBUSY;
872                 }
873
874                 for (i = 0; i < 6; i += 2) {
875                         u32 low, high;
876
877                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
878                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
879                             tg3_wait_macro_done(tp)) {
880                                 *resetp = 1;
881                                 return -EBUSY;
882                         }
883                         low &= 0x7fff;
884                         high &= 0x000f;
885                         if (low != test_pat[chan][i] ||
886                             high != test_pat[chan][i+1]) {
887                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
888                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
889                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
890
891                                 return -EBUSY;
892                         }
893                 }
894         }
895
896         return 0;
897 }
898
899 static int tg3_phy_reset_chanpat(struct tg3 *tp)
900 {
901         int chan;
902
903         for (chan = 0; chan < 4; chan++) {
904                 int i;
905
906                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
907                              (chan * 0x2000) | 0x0200);
908                 tg3_writephy(tp, 0x16, 0x0002);
909                 for (i = 0; i < 6; i++)
910                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
911                 tg3_writephy(tp, 0x16, 0x0202);
912                 if (tg3_wait_macro_done(tp))
913                         return -EBUSY;
914         }
915
916         return 0;
917 }
918
919 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
920 {
921         u32 reg32, phy9_orig;
922         int retries, do_phy_reset, err;
923
924         retries = 10;
925         do_phy_reset = 1;
926         do {
927                 if (do_phy_reset) {
928                         err = tg3_bmcr_reset(tp);
929                         if (err)
930                                 return err;
931                         do_phy_reset = 0;
932                 }
933
934                 /* Disable transmitter and interrupt.  */
935                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
936                         continue;
937
938                 reg32 |= 0x3000;
939                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
940
941                 /* Set full-duplex, 1000 mbps.  */
942                 tg3_writephy(tp, MII_BMCR,
943                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
944
945                 /* Set to master mode.  */
946                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
947                         continue;
948
949                 tg3_writephy(tp, MII_TG3_CTRL,
950                              (MII_TG3_CTRL_AS_MASTER |
951                               MII_TG3_CTRL_ENABLE_AS_MASTER));
952
953                 /* Enable SM_DSP_CLOCK and 6dB.  */
954                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
955
956                 /* Block the PHY control access.  */
957                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
958                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
959
960                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
961                 if (!err)
962                         break;
963         } while (--retries);
964
965         err = tg3_phy_reset_chanpat(tp);
966         if (err)
967                 return err;
968
969         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
970         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
971
972         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
973         tg3_writephy(tp, 0x16, 0x0000);
974
975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
977                 /* Set Extended packet length bit for jumbo frames */
978                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
979         }
980         else {
981                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
982         }
983
984         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
985
986         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
987                 reg32 &= ~0x3000;
988                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
989         } else if (!err)
990                 err = -EBUSY;
991
992         return err;
993 }
994
995 static void tg3_link_report(struct tg3 *);
996
997 /* This will reset the tigon3 PHY if there is no valid
998  * link unless the FORCE argument is non-zero.
999  */
1000 static int tg3_phy_reset(struct tg3 *tp)
1001 {
1002         u32 phy_status;
1003         int err;
1004
1005         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1006         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1007         if (err != 0)
1008                 return -EBUSY;
1009
1010         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1011                 netif_carrier_off(tp->dev);
1012                 tg3_link_report(tp);
1013         }
1014
1015         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1016             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1018                 err = tg3_phy_reset_5703_4_5(tp);
1019                 if (err)
1020                         return err;
1021                 goto out;
1022         }
1023
1024         err = tg3_bmcr_reset(tp);
1025         if (err)
1026                 return err;
1027
1028 out:
1029         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1030                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1033                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1034                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1035                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1036         }
1037         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1038                 tg3_writephy(tp, 0x1c, 0x8d68);
1039                 tg3_writephy(tp, 0x1c, 0x8d68);
1040         }
1041         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1042                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1043                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1044                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1045                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1046                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1047                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1048                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1049                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1050         }
1051         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1053                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1054                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057         /* Set Extended packet length bit (bit 14) on all chips that */
1058         /* support jumbo frames */
1059         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1060                 /* Cannot do read-modify-write on 5401 */
1061                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1062         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1063                 u32 phy_reg;
1064
1065                 /* Set bit 14 with read-modify-write to preserve other bits */
1066                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1067                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1068                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1069         }
1070
1071         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1072          * jumbo frames transmission.
1073          */
1074         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1075                 u32 phy_reg;
1076
1077                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1078                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1079                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1080         }
1081
1082         tg3_phy_set_wirespeed(tp);
1083         return 0;
1084 }
1085
1086 static void tg3_frob_aux_power(struct tg3 *tp)
1087 {
1088         struct tg3 *tp_peer = tp;
1089
1090         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1091                 return;
1092
1093         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1094             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1095                 struct net_device *dev_peer;
1096
1097                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1098                 /* remove_one() may have been run on the peer. */
1099                 if (!dev_peer)
1100                         tp_peer = tp;
1101                 else
1102                         tp_peer = netdev_priv(dev_peer);
1103         }
1104
1105         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1106             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1107             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1108             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1110                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1111                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1112                                     (GRC_LCLCTRL_GPIO_OE0 |
1113                                      GRC_LCLCTRL_GPIO_OE1 |
1114                                      GRC_LCLCTRL_GPIO_OE2 |
1115                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1116                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1117                                     100);
1118                 } else {
1119                         u32 no_gpio2;
1120                         u32 grc_local_ctrl = 0;
1121
1122                         if (tp_peer != tp &&
1123                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1124                                 return;
1125
1126                         /* Workaround to prevent overdrawing Amps. */
1127                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1128                             ASIC_REV_5714) {
1129                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1130                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                             grc_local_ctrl, 100);
1132                         }
1133
1134                         /* On 5753 and variants, GPIO2 cannot be used. */
1135                         no_gpio2 = tp->nic_sram_data_cfg &
1136                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1137
1138                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1139                                          GRC_LCLCTRL_GPIO_OE1 |
1140                                          GRC_LCLCTRL_GPIO_OE2 |
1141                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1142                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1143                         if (no_gpio2) {
1144                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1145                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1146                         }
1147                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1148                                                     grc_local_ctrl, 100);
1149
1150                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1151
1152                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153                                                     grc_local_ctrl, 100);
1154
1155                         if (!no_gpio2) {
1156                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1157                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1158                                             grc_local_ctrl, 100);
1159                         }
1160                 }
1161         } else {
1162                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1163                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1164                         if (tp_peer != tp &&
1165                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1166                                 return;
1167
1168                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1169                                     (GRC_LCLCTRL_GPIO_OE1 |
1170                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1171
1172                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1173                                     GRC_LCLCTRL_GPIO_OE1, 100);
1174
1175                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1176                                     (GRC_LCLCTRL_GPIO_OE1 |
1177                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1178                 }
1179         }
1180 }
1181
1182 static int tg3_setup_phy(struct tg3 *, int);
1183
1184 #define RESET_KIND_SHUTDOWN     0
1185 #define RESET_KIND_INIT         1
1186 #define RESET_KIND_SUSPEND      2
1187
1188 static void tg3_write_sig_post_reset(struct tg3 *, int);
1189 static int tg3_halt_cpu(struct tg3 *, u32);
1190 static int tg3_nvram_lock(struct tg3 *);
1191 static void tg3_nvram_unlock(struct tg3 *);
1192
1193 static void tg3_power_down_phy(struct tg3 *tp)
1194 {
1195         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1196                 return;
1197
1198         tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1199         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1200
1201         /* The PHY should not be powered down on some chips because
1202          * of bugs.
1203          */
1204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1205             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1206             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1207              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1208                 return;
1209         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1210 }
1211
1212 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1213 {
1214         u32 misc_host_ctrl;
1215         u16 power_control, power_caps;
1216         int pm = tp->pm_cap;
1217
1218         /* Make sure register accesses (indirect or otherwise)
1219          * will function correctly.
1220          */
1221         pci_write_config_dword(tp->pdev,
1222                                TG3PCI_MISC_HOST_CTRL,
1223                                tp->misc_host_ctrl);
1224
1225         pci_read_config_word(tp->pdev,
1226                              pm + PCI_PM_CTRL,
1227                              &power_control);
1228         power_control |= PCI_PM_CTRL_PME_STATUS;
1229         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1230         switch (state) {
1231         case PCI_D0:
1232                 power_control |= 0;
1233                 pci_write_config_word(tp->pdev,
1234                                       pm + PCI_PM_CTRL,
1235                                       power_control);
1236                 udelay(100);    /* Delay after power state change */
1237
1238                 /* Switch out of Vaux if it is not a LOM */
1239                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1240                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1241
1242                 return 0;
1243
1244         case PCI_D1:
1245                 power_control |= 1;
1246                 break;
1247
1248         case PCI_D2:
1249                 power_control |= 2;
1250                 break;
1251
1252         case PCI_D3hot:
1253                 power_control |= 3;
1254                 break;
1255
1256         default:
1257                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1258                        "requested.\n",
1259                        tp->dev->name, state);
1260                 return -EINVAL;
1261         };
1262
1263         power_control |= PCI_PM_CTRL_PME_ENABLE;
1264
1265         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1266         tw32(TG3PCI_MISC_HOST_CTRL,
1267              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1268
1269         if (tp->link_config.phy_is_low_power == 0) {
1270                 tp->link_config.phy_is_low_power = 1;
1271                 tp->link_config.orig_speed = tp->link_config.speed;
1272                 tp->link_config.orig_duplex = tp->link_config.duplex;
1273                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1274         }
1275
1276         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1277                 tp->link_config.speed = SPEED_10;
1278                 tp->link_config.duplex = DUPLEX_HALF;
1279                 tp->link_config.autoneg = AUTONEG_ENABLE;
1280                 tg3_setup_phy(tp, 0);
1281         }
1282
1283         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1284                 int i;
1285                 u32 val;
1286
1287                 for (i = 0; i < 200; i++) {
1288                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1289                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1290                                 break;
1291 #if (LINUX_VERSION_CODE < 0x20607)
1292                         set_current_state(TASK_UNINTERRUPTIBLE);
1293                         schedule_timeout(HZ / 1000);
1294 #else
1295                         msleep(1);
1296 #endif
1297                 }
1298         }
1299         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1300                                              WOL_DRV_STATE_SHUTDOWN |
1301                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1302
1303         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1304
1305         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1306                 u32 mac_mode;
1307
1308                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1309                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1310                         udelay(40);
1311
1312                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1313                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1314                         else
1315                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1316
1317                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1318                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1319                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1320                 } else {
1321                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1322                 }
1323
1324                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1325                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1326
1327                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1328                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1329                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1330
1331                 tw32_f(MAC_MODE, mac_mode);
1332                 udelay(100);
1333
1334                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1335                 udelay(10);
1336         }
1337
1338         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1339             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1340              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1341                 u32 base_val;
1342
1343                 base_val = tp->pci_clock_ctrl;
1344                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1345                              CLOCK_CTRL_TXCLK_DISABLE);
1346
1347                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1348                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1349         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1350                 /* do nothing */
1351         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1352                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1353                 u32 newbits1, newbits2;
1354
1355                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1356                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1357                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1358                                     CLOCK_CTRL_TXCLK_DISABLE |
1359                                     CLOCK_CTRL_ALTCLK);
1360                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1361                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1362                         newbits1 = CLOCK_CTRL_625_CORE;
1363                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1364                 } else {
1365                         newbits1 = CLOCK_CTRL_ALTCLK;
1366                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1367                 }
1368
1369                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1370                             40);
1371
1372                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1373                             40);
1374
1375                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1376                         u32 newbits3;
1377
1378                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1379                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1380                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1381                                             CLOCK_CTRL_TXCLK_DISABLE |
1382                                             CLOCK_CTRL_44MHZ_CORE);
1383                         } else {
1384                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1385                         }
1386
1387                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1388                                     tp->pci_clock_ctrl | newbits3, 40);
1389                 }
1390         }
1391
1392         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1393             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1394                 /* Turn off the PHY */
1395                 tg3_power_down_phy(tp);
1396
1397         tg3_frob_aux_power(tp);
1398
1399         /* Workaround for unstable PLL clock */
1400         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1401             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1402                 u32 val = tr32(0x7d00);
1403
1404                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1405                 tw32(0x7d00, val);
1406                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1407                         int err;
1408
1409                         err = tg3_nvram_lock(tp);
1410                         tg3_halt_cpu(tp, RX_CPU_BASE);
1411                         if (!err)
1412                                 tg3_nvram_unlock(tp);
1413                 }
1414         }
1415
1416         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1417
1418         /* Finally, set the new power state. */
1419         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1420         udelay(100);    /* Delay after power state change */
1421
1422         return 0;
1423 }
1424
1425 static void tg3_link_report(struct tg3 *tp)
1426 {
1427         if (!netif_carrier_ok(tp->dev)) {
1428                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1429         } else {
1430                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1431                        tp->dev->name,
1432                        (tp->link_config.active_speed == SPEED_1000 ?
1433                         1000 :
1434                         (tp->link_config.active_speed == SPEED_100 ?
1435                          100 : 10)),
1436                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1437                         "full" : "half"));
1438
1439                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1440                        "%s for RX.\n",
1441                        tp->dev->name,
1442                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1443                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1444         }
1445 }
1446
1447 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1448 {
1449         u32 new_tg3_flags = 0;
1450         u32 old_rx_mode = tp->rx_mode;
1451         u32 old_tx_mode = tp->tx_mode;
1452
1453         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1454
1455                 /* Convert 1000BaseX flow control bits to 1000BaseT
1456                  * bits before resolving flow control.
1457                  */
1458                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1459                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1460                                        ADVERTISE_PAUSE_ASYM);
1461                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1462
1463                         if (local_adv & ADVERTISE_1000XPAUSE)
1464                                 local_adv |= ADVERTISE_PAUSE_CAP;
1465                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1466                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1467                         if (remote_adv & LPA_1000XPAUSE)
1468                                 remote_adv |= LPA_PAUSE_CAP;
1469                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1470                                 remote_adv |= LPA_PAUSE_ASYM;
1471                 }
1472
1473                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1474                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1475                                 if (remote_adv & LPA_PAUSE_CAP)
1476                                         new_tg3_flags |=
1477                                                 (TG3_FLAG_RX_PAUSE |
1478                                                 TG3_FLAG_TX_PAUSE);
1479                                 else if (remote_adv & LPA_PAUSE_ASYM)
1480                                         new_tg3_flags |=
1481                                                 (TG3_FLAG_RX_PAUSE);
1482                         } else {
1483                                 if (remote_adv & LPA_PAUSE_CAP)
1484                                         new_tg3_flags |=
1485                                                 (TG3_FLAG_RX_PAUSE |
1486                                                 TG3_FLAG_TX_PAUSE);
1487                         }
1488                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1489                         if ((remote_adv & LPA_PAUSE_CAP) &&
1490                         (remote_adv & LPA_PAUSE_ASYM))
1491                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1492                 }
1493
1494                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1495                 tp->tg3_flags |= new_tg3_flags;
1496         } else {
1497                 new_tg3_flags = tp->tg3_flags;
1498         }
1499
1500         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1501                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1502         else
1503                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1504
1505         if (old_rx_mode != tp->rx_mode) {
1506                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1507         }
1508         
1509         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1510                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1511         else
1512                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1513
1514         if (old_tx_mode != tp->tx_mode) {
1515                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1516         }
1517 }
1518
1519 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1520 {
1521         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1522         case MII_TG3_AUX_STAT_10HALF:
1523                 *speed = SPEED_10;
1524                 *duplex = DUPLEX_HALF;
1525                 break;
1526
1527         case MII_TG3_AUX_STAT_10FULL:
1528                 *speed = SPEED_10;
1529                 *duplex = DUPLEX_FULL;
1530                 break;
1531
1532         case MII_TG3_AUX_STAT_100HALF:
1533                 *speed = SPEED_100;
1534                 *duplex = DUPLEX_HALF;
1535                 break;
1536
1537         case MII_TG3_AUX_STAT_100FULL:
1538                 *speed = SPEED_100;
1539                 *duplex = DUPLEX_FULL;
1540                 break;
1541
1542         case MII_TG3_AUX_STAT_1000HALF:
1543                 *speed = SPEED_1000;
1544                 *duplex = DUPLEX_HALF;
1545                 break;
1546
1547         case MII_TG3_AUX_STAT_1000FULL:
1548                 *speed = SPEED_1000;
1549                 *duplex = DUPLEX_FULL;
1550                 break;
1551
1552         default:
1553                 *speed = SPEED_INVALID;
1554                 *duplex = DUPLEX_INVALID;
1555                 break;
1556         };
1557 }
1558
1559 static void tg3_phy_copper_begin(struct tg3 *tp)
1560 {
1561         u32 new_adv;
1562         int i;
1563
1564         if (tp->link_config.phy_is_low_power) {
1565                 /* Entering low power mode.  Disable gigabit and
1566                  * 100baseT advertisements.
1567                  */
1568                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1569
1570                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1571                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1572                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1573                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1574
1575                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1576         } else if (tp->link_config.speed == SPEED_INVALID) {
1577                 tp->link_config.advertising =
1578                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1579                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1580                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1581                          ADVERTISED_Autoneg | ADVERTISED_MII);
1582
1583                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1584                         tp->link_config.advertising &=
1585                                 ~(ADVERTISED_1000baseT_Half |
1586                                   ADVERTISED_1000baseT_Full);
1587
1588                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1589                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1590                         new_adv |= ADVERTISE_10HALF;
1591                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1592                         new_adv |= ADVERTISE_10FULL;
1593                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1594                         new_adv |= ADVERTISE_100HALF;
1595                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1596                         new_adv |= ADVERTISE_100FULL;
1597                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1598
1599                 if (tp->link_config.advertising &
1600                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1601                         new_adv = 0;
1602                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1603                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1604                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1605                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1606                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1607                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1608                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1609                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1610                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1611                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1612                 } else {
1613                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1614                 }
1615         } else {
1616                 /* Asking for a specific link mode. */
1617                 if (tp->link_config.speed == SPEED_1000) {
1618                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1619                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1620
1621                         if (tp->link_config.duplex == DUPLEX_FULL)
1622                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1623                         else
1624                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1625                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1626                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1627                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1628                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1629                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1630                 } else {
1631                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1632
1633                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1634                         if (tp->link_config.speed == SPEED_100) {
1635                                 if (tp->link_config.duplex == DUPLEX_FULL)
1636                                         new_adv |= ADVERTISE_100FULL;
1637                                 else
1638                                         new_adv |= ADVERTISE_100HALF;
1639                         } else {
1640                                 if (tp->link_config.duplex == DUPLEX_FULL)
1641                                         new_adv |= ADVERTISE_10FULL;
1642                                 else
1643                                         new_adv |= ADVERTISE_10HALF;
1644                         }
1645                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1646                 }
1647         }
1648
1649         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1650             tp->link_config.speed != SPEED_INVALID) {
1651                 u32 bmcr, orig_bmcr;
1652
1653                 tp->link_config.active_speed = tp->link_config.speed;
1654                 tp->link_config.active_duplex = tp->link_config.duplex;
1655
1656                 bmcr = 0;
1657                 switch (tp->link_config.speed) {
1658                 default:
1659                 case SPEED_10:
1660                         break;
1661
1662                 case SPEED_100:
1663                         bmcr |= BMCR_SPEED100;
1664                         break;
1665
1666                 case SPEED_1000:
1667                         bmcr |= TG3_BMCR_SPEED1000;
1668                         break;
1669                 };
1670
1671                 if (tp->link_config.duplex == DUPLEX_FULL)
1672                         bmcr |= BMCR_FULLDPLX;
1673
1674                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1675                     (bmcr != orig_bmcr)) {
1676                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1677                         for (i = 0; i < 1500; i++) {
1678                                 u32 tmp;
1679
1680                                 udelay(10);
1681                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1682                                     tg3_readphy(tp, MII_BMSR, &tmp))
1683                                         continue;
1684                                 if (!(tmp & BMSR_LSTATUS)) {
1685                                         udelay(40);
1686                                         break;
1687                                 }
1688                         }
1689                         tg3_writephy(tp, MII_BMCR, bmcr);
1690                         udelay(40);
1691                 }
1692         } else {
1693                 tg3_writephy(tp, MII_BMCR,
1694                              BMCR_ANENABLE | BMCR_ANRESTART);
1695         }
1696 }
1697
1698 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1699 {
1700         int err;
1701
1702         /* Turn off tap power management. */
1703         /* Set Extended packet length bit */
1704         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1705
1706         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1707         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1708
1709         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1710         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1711
1712         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1713         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1714
1715         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1716         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1717
1718         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1719         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1720
1721         udelay(40);
1722
1723         return err;
1724 }
1725
1726 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1727 {
1728         u32 adv_reg, all_mask;
1729
1730         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1731                 return 0;
1732
1733         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1734                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1735         if ((adv_reg & all_mask) != all_mask)
1736                 return 0;
1737         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1738                 u32 tg3_ctrl;
1739
1740                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1741                         return 0;
1742
1743                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1744                             MII_TG3_CTRL_ADV_1000_FULL);
1745                 if ((tg3_ctrl & all_mask) != all_mask)
1746                         return 0;
1747         }
1748         return 1;
1749 }
1750
1751 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1752 {
1753         int current_link_up;
1754         u32 bmsr, dummy;
1755         u16 current_speed;
1756         u8 current_duplex;
1757         int i, err;
1758
1759         tw32(MAC_EVENT, 0);
1760
1761         tw32_f(MAC_STATUS,
1762              (MAC_STATUS_SYNC_CHANGED |
1763               MAC_STATUS_CFG_CHANGED |
1764               MAC_STATUS_MI_COMPLETION |
1765               MAC_STATUS_LNKSTATE_CHANGED));
1766         udelay(40);
1767
1768         tp->mi_mode = MAC_MI_MODE_BASE;
1769         tw32_f(MAC_MI_MODE, tp->mi_mode);
1770         udelay(80);
1771
1772         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1773
1774         /* Some third-party PHYs need to be reset on link going
1775          * down.
1776          */
1777         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1778              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1779              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1780             netif_carrier_ok(tp->dev)) {
1781                 tg3_readphy(tp, MII_BMSR, &bmsr);
1782                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1783                     !(bmsr & BMSR_LSTATUS))
1784                         force_reset = 1;
1785         }
1786         if (force_reset)
1787                 tg3_phy_reset(tp);
1788
1789         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1790                 tg3_readphy(tp, MII_BMSR, &bmsr);
1791                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1792                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1793                         bmsr = 0;
1794
1795                 if (!(bmsr & BMSR_LSTATUS)) {
1796                         err = tg3_init_5401phy_dsp(tp);
1797                         if (err)
1798                                 return err;
1799
1800                         tg3_readphy(tp, MII_BMSR, &bmsr);
1801                         for (i = 0; i < 1000; i++) {
1802                                 udelay(10);
1803                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1804                                     (bmsr & BMSR_LSTATUS)) {
1805                                         udelay(40);
1806                                         break;
1807                                 }
1808                         }
1809
1810                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1811                             !(bmsr & BMSR_LSTATUS) &&
1812                             tp->link_config.active_speed == SPEED_1000) {
1813                                 err = tg3_phy_reset(tp);
1814                                 if (!err)
1815                                         err = tg3_init_5401phy_dsp(tp);
1816                                 if (err)
1817                                         return err;
1818                         }
1819                 }
1820         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1821                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1822                 /* 5701 {A0,B0} CRC bug workaround */
1823                 tg3_writephy(tp, 0x15, 0x0a75);
1824                 tg3_writephy(tp, 0x1c, 0x8c68);
1825                 tg3_writephy(tp, 0x1c, 0x8d68);
1826                 tg3_writephy(tp, 0x1c, 0x8c68);
1827         }
1828
1829         /* Clear pending interrupts... */
1830         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1831         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1832
1833         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1834                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1835         else
1836                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1837
1838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1840                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1841                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1842                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1843                 else
1844                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1845         }
1846
1847         current_link_up = 0;
1848         current_speed = SPEED_INVALID;
1849         current_duplex = DUPLEX_INVALID;
1850
1851         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1852                 u32 val;
1853
1854                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1855                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1856                 if (!(val & (1 << 10))) {
1857                         val |= (1 << 10);
1858                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1859                         goto relink;
1860                 }
1861         }
1862
1863         bmsr = 0;
1864         for (i = 0; i < 100; i++) {
1865                 tg3_readphy(tp, MII_BMSR, &bmsr);
1866                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1867                     (bmsr & BMSR_LSTATUS))
1868                         break;
1869                 udelay(40);
1870         }
1871
1872         if (bmsr & BMSR_LSTATUS) {
1873                 u32 aux_stat, bmcr;
1874
1875                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1876                 for (i = 0; i < 2000; i++) {
1877                         udelay(10);
1878                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1879                             aux_stat)
1880                                 break;
1881                 }
1882
1883                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1884                                              &current_speed,
1885                                              &current_duplex);
1886
1887                 bmcr = 0;
1888                 for (i = 0; i < 200; i++) {
1889                         tg3_readphy(tp, MII_BMCR, &bmcr);
1890                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1891                                 continue;
1892                         if (bmcr && bmcr != 0x7fff)
1893                                 break;
1894                         udelay(10);
1895                 }
1896
1897                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1898                         if (bmcr & BMCR_ANENABLE) {
1899                                 current_link_up = 1;
1900
1901                                 /* Force autoneg restart if we are exiting
1902                                  * low power mode.
1903                                  */
1904                                 if (!tg3_copper_is_advertising_all(tp))
1905                                         current_link_up = 0;
1906                         } else {
1907                                 current_link_up = 0;
1908                         }
1909                 } else {
1910                         if (!(bmcr & BMCR_ANENABLE) &&
1911                             tp->link_config.speed == current_speed &&
1912                             tp->link_config.duplex == current_duplex) {
1913                                 current_link_up = 1;
1914                         } else {
1915                                 current_link_up = 0;
1916                         }
1917                 }
1918
1919                 tp->link_config.active_speed = current_speed;
1920                 tp->link_config.active_duplex = current_duplex;
1921         }
1922
1923         if (current_link_up == 1 &&
1924             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1925             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1926                 u32 local_adv, remote_adv;
1927
1928                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1929                         local_adv = 0;
1930                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1931
1932                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1933                         remote_adv = 0;
1934
1935                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1936
1937                 /* If we are not advertising full pause capability,
1938                  * something is wrong.  Bring the link down and reconfigure.
1939                  */
1940                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1941                         current_link_up = 0;
1942                 } else {
1943                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1944                 }
1945         }
1946 relink:
1947         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1948                 u32 tmp;
1949
1950                 tg3_phy_copper_begin(tp);
1951
1952                 tg3_readphy(tp, MII_BMSR, &tmp);
1953                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1954                     (tmp & BMSR_LSTATUS))
1955                         current_link_up = 1;
1956         }
1957
1958         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1959         if (current_link_up == 1) {
1960                 if (tp->link_config.active_speed == SPEED_100 ||
1961                     tp->link_config.active_speed == SPEED_10)
1962                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1963                 else
1964                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1965         } else
1966                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1967
1968         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1969         if (tp->link_config.active_duplex == DUPLEX_HALF)
1970                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1971
1972         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1973         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1974                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1975                     (current_link_up == 1 &&
1976                      tp->link_config.active_speed == SPEED_10))
1977                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1978         } else {
1979                 if (current_link_up == 1)
1980                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1981         }
1982
1983         /* ??? Without this setting Netgear GA302T PHY does not
1984          * ??? send/receive packets...
1985          */
1986         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1987             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1988                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1989                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1990                 udelay(80);
1991         }
1992
1993         tw32_f(MAC_MODE, tp->mac_mode);
1994         udelay(40);
1995
1996         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1997                 /* Polled via timer. */
1998                 tw32_f(MAC_EVENT, 0);
1999         } else {
2000                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2001         }
2002         udelay(40);
2003
2004         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2005             current_link_up == 1 &&
2006             tp->link_config.active_speed == SPEED_1000 &&
2007             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2008              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2009                 udelay(120);
2010                 tw32_f(MAC_STATUS,
2011                      (MAC_STATUS_SYNC_CHANGED |
2012                       MAC_STATUS_CFG_CHANGED));
2013                 udelay(40);
2014                 tg3_write_mem(tp,
2015                               NIC_SRAM_FIRMWARE_MBOX,
2016                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2017         }
2018
2019         if (current_link_up != netif_carrier_ok(tp->dev)) {
2020                 if (current_link_up)
2021                         netif_carrier_on(tp->dev);
2022                 else
2023                         netif_carrier_off(tp->dev);
2024                 tg3_link_report(tp);
2025         }
2026
2027         return 0;
2028 }
2029
2030 struct tg3_fiber_aneginfo {
2031         int state;
2032 #define ANEG_STATE_UNKNOWN              0
2033 #define ANEG_STATE_AN_ENABLE            1
2034 #define ANEG_STATE_RESTART_INIT         2
2035 #define ANEG_STATE_RESTART              3
2036 #define ANEG_STATE_DISABLE_LINK_OK      4
2037 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2038 #define ANEG_STATE_ABILITY_DETECT       6
2039 #define ANEG_STATE_ACK_DETECT_INIT      7
2040 #define ANEG_STATE_ACK_DETECT           8
2041 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2042 #define ANEG_STATE_COMPLETE_ACK         10
2043 #define ANEG_STATE_IDLE_DETECT_INIT     11
2044 #define ANEG_STATE_IDLE_DETECT          12
2045 #define ANEG_STATE_LINK_OK              13
2046 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2047 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2048
2049         u32 flags;
2050 #define MR_AN_ENABLE            0x00000001
2051 #define MR_RESTART_AN           0x00000002
2052 #define MR_AN_COMPLETE          0x00000004
2053 #define MR_PAGE_RX              0x00000008
2054 #define MR_NP_LOADED            0x00000010
2055 #define MR_TOGGLE_TX            0x00000020
2056 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2057 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2058 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2059 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2060 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2061 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2062 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2063 #define MR_TOGGLE_RX            0x00002000
2064 #define MR_NP_RX                0x00004000
2065
2066 #define MR_LINK_OK              0x80000000
2067
2068         unsigned long link_time, cur_time;
2069
2070         u32 ability_match_cfg;
2071         int ability_match_count;
2072
2073         char ability_match, idle_match, ack_match;
2074
2075         u32 txconfig, rxconfig;
2076 #define ANEG_CFG_NP             0x00000080
2077 #define ANEG_CFG_ACK            0x00000040
2078 #define ANEG_CFG_RF2            0x00000020
2079 #define ANEG_CFG_RF1            0x00000010
2080 #define ANEG_CFG_PS2            0x00000001
2081 #define ANEG_CFG_PS1            0x00008000
2082 #define ANEG_CFG_HD             0x00004000
2083 #define ANEG_CFG_FD             0x00002000
2084 #define ANEG_CFG_INVAL          0x00001f06
2085
2086 };
2087 #define ANEG_OK         0
2088 #define ANEG_DONE       1
2089 #define ANEG_TIMER_ENAB 2
2090 #define ANEG_FAILED     -1
2091
2092 #define ANEG_STATE_SETTLE_TIME  10000
2093
2094 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2095                                    struct tg3_fiber_aneginfo *ap)
2096 {
2097         unsigned long delta;
2098         u32 rx_cfg_reg;
2099         int ret;
2100
2101         if (ap->state == ANEG_STATE_UNKNOWN) {
2102                 ap->rxconfig = 0;
2103                 ap->link_time = 0;
2104                 ap->cur_time = 0;
2105                 ap->ability_match_cfg = 0;
2106                 ap->ability_match_count = 0;
2107                 ap->ability_match = 0;
2108                 ap->idle_match = 0;
2109                 ap->ack_match = 0;
2110         }
2111         ap->cur_time++;
2112
2113         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2114                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2115
2116                 if (rx_cfg_reg != ap->ability_match_cfg) {
2117                         ap->ability_match_cfg = rx_cfg_reg;
2118                         ap->ability_match = 0;
2119                         ap->ability_match_count = 0;
2120                 } else {
2121                         if (++ap->ability_match_count > 1) {
2122                                 ap->ability_match = 1;
2123                                 ap->ability_match_cfg = rx_cfg_reg;
2124                         }
2125                 }
2126                 if (rx_cfg_reg & ANEG_CFG_ACK)
2127                         ap->ack_match = 1;
2128                 else
2129                         ap->ack_match = 0;
2130
2131                 ap->idle_match = 0;
2132         } else {
2133                 ap->idle_match = 1;
2134                 ap->ability_match_cfg = 0;
2135                 ap->ability_match_count = 0;
2136                 ap->ability_match = 0;
2137                 ap->ack_match = 0;
2138
2139                 rx_cfg_reg = 0;
2140         }
2141
2142         ap->rxconfig = rx_cfg_reg;
2143         ret = ANEG_OK;
2144
2145         switch(ap->state) {
2146         case ANEG_STATE_UNKNOWN:
2147                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2148                         ap->state = ANEG_STATE_AN_ENABLE;
2149
2150                 /* fallthru */
2151         case ANEG_STATE_AN_ENABLE:
2152                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2153                 if (ap->flags & MR_AN_ENABLE) {
2154                         ap->link_time = 0;
2155                         ap->cur_time = 0;
2156                         ap->ability_match_cfg = 0;
2157                         ap->ability_match_count = 0;
2158                         ap->ability_match = 0;
2159                         ap->idle_match = 0;
2160                         ap->ack_match = 0;
2161
2162                         ap->state = ANEG_STATE_RESTART_INIT;
2163                 } else {
2164                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2165                 }
2166                 break;
2167
2168         case ANEG_STATE_RESTART_INIT:
2169                 ap->link_time = ap->cur_time;
2170                 ap->flags &= ~(MR_NP_LOADED);
2171                 ap->txconfig = 0;
2172                 tw32(MAC_TX_AUTO_NEG, 0);
2173                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174                 tw32_f(MAC_MODE, tp->mac_mode);
2175                 udelay(40);
2176
2177                 ret = ANEG_TIMER_ENAB;
2178                 ap->state = ANEG_STATE_RESTART;
2179
2180                 /* fallthru */
2181         case ANEG_STATE_RESTART:
2182                 delta = ap->cur_time - ap->link_time;
2183                 if (delta > ANEG_STATE_SETTLE_TIME) {
2184                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2185                 } else {
2186                         ret = ANEG_TIMER_ENAB;
2187                 }
2188                 break;
2189
2190         case ANEG_STATE_DISABLE_LINK_OK:
2191                 ret = ANEG_DONE;
2192                 break;
2193
2194         case ANEG_STATE_ABILITY_DETECT_INIT:
2195                 ap->flags &= ~(MR_TOGGLE_TX);
2196                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2197                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2198                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2199                 tw32_f(MAC_MODE, tp->mac_mode);
2200                 udelay(40);
2201
2202                 ap->state = ANEG_STATE_ABILITY_DETECT;
2203                 break;
2204
2205         case ANEG_STATE_ABILITY_DETECT:
2206                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2207                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2208                 }
2209                 break;
2210
2211         case ANEG_STATE_ACK_DETECT_INIT:
2212                 ap->txconfig |= ANEG_CFG_ACK;
2213                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2214                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2215                 tw32_f(MAC_MODE, tp->mac_mode);
2216                 udelay(40);
2217
2218                 ap->state = ANEG_STATE_ACK_DETECT;
2219
2220                 /* fallthru */
2221         case ANEG_STATE_ACK_DETECT:
2222                 if (ap->ack_match != 0) {
2223                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2224                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2225                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2226                         } else {
2227                                 ap->state = ANEG_STATE_AN_ENABLE;
2228                         }
2229                 } else if (ap->ability_match != 0 &&
2230                            ap->rxconfig == 0) {
2231                         ap->state = ANEG_STATE_AN_ENABLE;
2232                 }
2233                 break;
2234
2235         case ANEG_STATE_COMPLETE_ACK_INIT:
2236                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2237                         ret = ANEG_FAILED;
2238                         break;
2239                 }
2240                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2241                                MR_LP_ADV_HALF_DUPLEX |
2242                                MR_LP_ADV_SYM_PAUSE |
2243                                MR_LP_ADV_ASYM_PAUSE |
2244                                MR_LP_ADV_REMOTE_FAULT1 |
2245                                MR_LP_ADV_REMOTE_FAULT2 |
2246                                MR_LP_ADV_NEXT_PAGE |
2247                                MR_TOGGLE_RX |
2248                                MR_NP_RX);
2249                 if (ap->rxconfig & ANEG_CFG_FD)
2250                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2251                 if (ap->rxconfig & ANEG_CFG_HD)
2252                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2253                 if (ap->rxconfig & ANEG_CFG_PS1)
2254                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2255                 if (ap->rxconfig & ANEG_CFG_PS2)
2256                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2257                 if (ap->rxconfig & ANEG_CFG_RF1)
2258                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2259                 if (ap->rxconfig & ANEG_CFG_RF2)
2260                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2261                 if (ap->rxconfig & ANEG_CFG_NP)
2262                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2263
2264                 ap->link_time = ap->cur_time;
2265
2266                 ap->flags ^= (MR_TOGGLE_TX);
2267                 if (ap->rxconfig & 0x0008)
2268                         ap->flags |= MR_TOGGLE_RX;
2269                 if (ap->rxconfig & ANEG_CFG_NP)
2270                         ap->flags |= MR_NP_RX;
2271                 ap->flags |= MR_PAGE_RX;
2272
2273                 ap->state = ANEG_STATE_COMPLETE_ACK;
2274                 ret = ANEG_TIMER_ENAB;
2275                 break;
2276
2277         case ANEG_STATE_COMPLETE_ACK:
2278                 if (ap->ability_match != 0 &&
2279                     ap->rxconfig == 0) {
2280                         ap->state = ANEG_STATE_AN_ENABLE;
2281                         break;
2282                 }
2283                 delta = ap->cur_time - ap->link_time;
2284                 if (delta > ANEG_STATE_SETTLE_TIME) {
2285                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2286                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2287                         } else {
2288                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2289                                     !(ap->flags & MR_NP_RX)) {
2290                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2291                                 } else {
2292                                         ret = ANEG_FAILED;
2293                                 }
2294                         }
2295                 }
2296                 break;
2297
2298         case ANEG_STATE_IDLE_DETECT_INIT:
2299                 ap->link_time = ap->cur_time;
2300                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2301                 tw32_f(MAC_MODE, tp->mac_mode);
2302                 udelay(40);
2303
2304                 ap->state = ANEG_STATE_IDLE_DETECT;
2305                 ret = ANEG_TIMER_ENAB;
2306                 break;
2307
2308         case ANEG_STATE_IDLE_DETECT:
2309                 if (ap->ability_match != 0 &&
2310                     ap->rxconfig == 0) {
2311                         ap->state = ANEG_STATE_AN_ENABLE;
2312                         break;
2313                 }
2314                 delta = ap->cur_time - ap->link_time;
2315                 if (delta > ANEG_STATE_SETTLE_TIME) {
2316                         /* XXX another gem from the Broadcom driver :( */
2317                         ap->state = ANEG_STATE_LINK_OK;
2318                 }
2319                 break;
2320
2321         case ANEG_STATE_LINK_OK:
2322                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2323                 ret = ANEG_DONE;
2324                 break;
2325
2326         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2327                 /* ??? unimplemented */
2328                 break;
2329
2330         case ANEG_STATE_NEXT_PAGE_WAIT:
2331                 /* ??? unimplemented */
2332                 break;
2333
2334         default:
2335                 ret = ANEG_FAILED;
2336                 break;
2337         };
2338
2339         return ret;
2340 }
2341
2342 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2343 {
2344         int res = 0;
2345         struct tg3_fiber_aneginfo aninfo;
2346         int status = ANEG_FAILED;
2347         unsigned int tick;
2348         u32 tmp;
2349
2350         tw32_f(MAC_TX_AUTO_NEG, 0);
2351
2352         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2353         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2354         udelay(40);
2355
2356         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2357         udelay(40);
2358
2359         memset(&aninfo, 0, sizeof(aninfo));
2360         aninfo.flags |= MR_AN_ENABLE;
2361         aninfo.state = ANEG_STATE_UNKNOWN;
2362         aninfo.cur_time = 0;
2363         tick = 0;
2364         while (++tick < 195000) {
2365                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2366                 if (status == ANEG_DONE || status == ANEG_FAILED)
2367                         break;
2368
2369                 udelay(1);
2370         }
2371
2372         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2373         tw32_f(MAC_MODE, tp->mac_mode);
2374         udelay(40);
2375
2376         *flags = aninfo.flags;
2377
2378         if (status == ANEG_DONE &&
2379             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2380                              MR_LP_ADV_FULL_DUPLEX)))
2381                 res = 1;
2382
2383         return res;
2384 }
2385
2386 static void tg3_init_bcm8002(struct tg3 *tp)
2387 {
2388         u32 mac_status = tr32(MAC_STATUS);
2389         int i;
2390
2391         /* Reset when initting first time or we have a link. */
2392         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2393             !(mac_status & MAC_STATUS_PCS_SYNCED))
2394                 return;
2395
2396         /* Set PLL lock range. */
2397         tg3_writephy(tp, 0x16, 0x8007);
2398
2399         /* SW reset */
2400         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2401
2402         /* Wait for reset to complete. */
2403         /* XXX schedule_timeout() ... */
2404         for (i = 0; i < 500; i++)
2405                 udelay(10);
2406
2407         /* Config mode; select PMA/Ch 1 regs. */
2408         tg3_writephy(tp, 0x10, 0x8411);
2409
2410         /* Enable auto-lock and comdet, select txclk for tx. */
2411         tg3_writephy(tp, 0x11, 0x0a10);
2412
2413         tg3_writephy(tp, 0x18, 0x00a0);
2414         tg3_writephy(tp, 0x16, 0x41ff);
2415
2416         /* Assert and deassert POR. */
2417         tg3_writephy(tp, 0x13, 0x0400);
2418         udelay(40);
2419         tg3_writephy(tp, 0x13, 0x0000);
2420
2421         tg3_writephy(tp, 0x11, 0x0a50);
2422         udelay(40);
2423         tg3_writephy(tp, 0x11, 0x0a10);
2424
2425         /* Wait for signal to stabilize */
2426         /* XXX schedule_timeout() ... */
2427         for (i = 0; i < 15000; i++)
2428                 udelay(10);
2429
2430         /* Deselect the channel register so we can read the PHYID
2431          * later.
2432          */
2433         tg3_writephy(tp, 0x10, 0x8011);
2434 }
2435
2436 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2437 {
2438         u32 sg_dig_ctrl, sg_dig_status;
2439         u32 serdes_cfg, expected_sg_dig_ctrl;
2440         int workaround, port_a;
2441         int current_link_up;
2442
2443         serdes_cfg = 0;
2444         expected_sg_dig_ctrl = 0;
2445         workaround = 0;
2446         port_a = 1;
2447         current_link_up = 0;
2448
2449         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2450             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2451                 workaround = 1;
2452                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2453                         port_a = 0;
2454
2455                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2456                 /* preserve bits 20-23 for voltage regulator */
2457                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2458         }
2459
2460         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2461
2462         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2463                 if (sg_dig_ctrl & (1 << 31)) {
2464                         if (workaround) {
2465                                 u32 val = serdes_cfg;
2466
2467                                 if (port_a)
2468                                         val |= 0xc010000;
2469                                 else
2470                                         val |= 0x4010000;
2471                                 tw32_f(MAC_SERDES_CFG, val);
2472                         }
2473                         tw32_f(SG_DIG_CTRL, 0x01388400);
2474                 }
2475                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2476                         tg3_setup_flow_control(tp, 0, 0);
2477                         current_link_up = 1;
2478                 }
2479                 goto out;
2480         }
2481
2482         /* Want auto-negotiation.  */
2483         expected_sg_dig_ctrl = 0x81388400;
2484
2485         /* Pause capability */
2486         expected_sg_dig_ctrl |= (1 << 11);
2487
2488         /* Asymettric pause */
2489         expected_sg_dig_ctrl |= (1 << 12);
2490
2491         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2492 restart_autoneg:
2493                 if (workaround)
2494                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2495                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2496                 udelay(5);
2497                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2498
2499                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2500         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2501                                  MAC_STATUS_SIGNAL_DET)) {
2502                 int i;
2503
2504                 /* Giver time to negotiate (~200ms) */
2505                 for (i = 0; i < 40000; i++) {
2506                         sg_dig_status = tr32(SG_DIG_STATUS);
2507                         if (sg_dig_status & (0x3))
2508                                 break;
2509                         udelay(5);
2510                 }
2511                 mac_status = tr32(MAC_STATUS);
2512
2513                 if ((sg_dig_status & (1 << 1)) &&
2514                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2515                         u32 local_adv, remote_adv;
2516
2517                         local_adv = ADVERTISE_PAUSE_CAP;
2518                         remote_adv = 0;
2519                         if (sg_dig_status & (1 << 19))
2520                                 remote_adv |= LPA_PAUSE_CAP;
2521                         if (sg_dig_status & (1 << 20))
2522                                 remote_adv |= LPA_PAUSE_ASYM;
2523
2524                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2525                         current_link_up = 1;
2526                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2527                 } else if (!(sg_dig_status & (1 << 1))) {
2528                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2529                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2530                         else {
2531                                 if (workaround) {
2532                                         u32 val = serdes_cfg;
2533
2534                                         if (port_a)
2535                                                 val |= 0xc010000;
2536                                         else
2537                                                 val |= 0x4010000;
2538
2539                                         tw32_f(MAC_SERDES_CFG, val);
2540                                 }
2541
2542                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2543                                 udelay(40);
2544
2545                                 /* Link parallel detection - link is up */
2546                                 /* only if we have PCS_SYNC and not */
2547                                 /* receiving config code words */
2548                                 mac_status = tr32(MAC_STATUS);
2549                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2550                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2551                                         tg3_setup_flow_control(tp, 0, 0);
2552                                         current_link_up = 1;
2553                                 } else
2554                                         goto restart_autoneg;
2555                         }
2556                 }
2557         }
2558
2559 out:
2560         return current_link_up;
2561 }
2562
2563 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2564 {
2565         int current_link_up = 0;
2566
2567         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2568                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2569                 goto out;
2570         }
2571
2572         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2573                 u32 flags;
2574                 int i;
2575   
2576                 if (fiber_autoneg(tp, &flags)) {
2577                         u32 local_adv, remote_adv;
2578
2579                         local_adv = ADVERTISE_PAUSE_CAP;
2580                         remote_adv = 0;
2581                         if (flags & MR_LP_ADV_SYM_PAUSE)
2582                                 remote_adv |= LPA_PAUSE_CAP;
2583                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2584                                 remote_adv |= LPA_PAUSE_ASYM;
2585
2586                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2587
2588                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2589                         current_link_up = 1;
2590                 }
2591                 for (i = 0; i < 30; i++) {
2592                         udelay(20);
2593                         tw32_f(MAC_STATUS,
2594                                (MAC_STATUS_SYNC_CHANGED |
2595                                 MAC_STATUS_CFG_CHANGED));
2596                         udelay(40);
2597                         if ((tr32(MAC_STATUS) &
2598                              (MAC_STATUS_SYNC_CHANGED |
2599                               MAC_STATUS_CFG_CHANGED)) == 0)
2600                                 break;
2601                 }
2602
2603                 mac_status = tr32(MAC_STATUS);
2604                 if (current_link_up == 0 &&
2605                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2606                     !(mac_status & MAC_STATUS_RCVD_CFG))
2607                         current_link_up = 1;
2608         } else {
2609                 /* Forcing 1000FD link up. */
2610                 current_link_up = 1;
2611                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2612
2613                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2614                 udelay(40);
2615         }
2616
2617 out:
2618         return current_link_up;
2619 }
2620
2621 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2622 {
2623         u32 orig_pause_cfg;
2624         u16 orig_active_speed;
2625         u8 orig_active_duplex;
2626         u32 mac_status;
2627         int current_link_up;
2628         int i;
2629
2630         orig_pause_cfg =
2631                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2632                                   TG3_FLAG_TX_PAUSE));
2633         orig_active_speed = tp->link_config.active_speed;
2634         orig_active_duplex = tp->link_config.active_duplex;
2635
2636         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2637             netif_carrier_ok(tp->dev) &&
2638             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2639                 mac_status = tr32(MAC_STATUS);
2640                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2641                                MAC_STATUS_SIGNAL_DET |
2642                                MAC_STATUS_CFG_CHANGED |
2643                                MAC_STATUS_RCVD_CFG);
2644                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2645                                    MAC_STATUS_SIGNAL_DET)) {
2646                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2647                                             MAC_STATUS_CFG_CHANGED));
2648                         return 0;
2649                 }
2650         }
2651
2652         tw32_f(MAC_TX_AUTO_NEG, 0);
2653
2654         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2655         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2656         tw32_f(MAC_MODE, tp->mac_mode);
2657         udelay(40);
2658
2659         if (tp->phy_id == PHY_ID_BCM8002)
2660                 tg3_init_bcm8002(tp);
2661
2662         /* Enable link change event even when serdes polling.  */
2663         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2664         udelay(40);
2665
2666         current_link_up = 0;
2667         mac_status = tr32(MAC_STATUS);
2668
2669         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2670                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2671         else
2672                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2673
2674         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2675         tw32_f(MAC_MODE, tp->mac_mode);
2676         udelay(40);
2677
2678         tp->hw_status->status =
2679                 (SD_STATUS_UPDATED |
2680                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2681
2682         for (i = 0; i < 100; i++) {
2683                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2684                                     MAC_STATUS_CFG_CHANGED));
2685                 udelay(5);
2686                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2687                                          MAC_STATUS_CFG_CHANGED)) == 0)
2688                         break;
2689         }
2690
2691         mac_status = tr32(MAC_STATUS);
2692         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2693                 current_link_up = 0;
2694                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2695                         tw32_f(MAC_MODE, (tp->mac_mode |
2696                                           MAC_MODE_SEND_CONFIGS));
2697                         udelay(1);
2698                         tw32_f(MAC_MODE, tp->mac_mode);
2699                 }
2700         }
2701
2702         if (current_link_up == 1) {
2703                 tp->link_config.active_speed = SPEED_1000;
2704                 tp->link_config.active_duplex = DUPLEX_FULL;
2705                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2706                                     LED_CTRL_LNKLED_OVERRIDE |
2707                                     LED_CTRL_1000MBPS_ON));
2708         } else {
2709                 tp->link_config.active_speed = SPEED_INVALID;
2710                 tp->link_config.active_duplex = DUPLEX_INVALID;
2711                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2712                                     LED_CTRL_LNKLED_OVERRIDE |
2713                                     LED_CTRL_TRAFFIC_OVERRIDE));
2714         }
2715
2716         if (current_link_up != netif_carrier_ok(tp->dev)) {
2717                 if (current_link_up)
2718                         netif_carrier_on(tp->dev);
2719                 else
2720                         netif_carrier_off(tp->dev);
2721                 tg3_link_report(tp);
2722         } else {
2723                 u32 now_pause_cfg =
2724                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2725                                          TG3_FLAG_TX_PAUSE);
2726                 if (orig_pause_cfg != now_pause_cfg ||
2727                     orig_active_speed != tp->link_config.active_speed ||
2728                     orig_active_duplex != tp->link_config.active_duplex)
2729                         tg3_link_report(tp);
2730         }
2731
2732         return 0;
2733 }
2734
2735 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2736 {
2737         int current_link_up, err = 0;
2738         u32 bmsr, bmcr;
2739         u16 current_speed;
2740         u8 current_duplex;
2741
2742         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2743         tw32_f(MAC_MODE, tp->mac_mode);
2744         udelay(40);
2745
2746         tw32(MAC_EVENT, 0);
2747
2748         tw32_f(MAC_STATUS,
2749              (MAC_STATUS_SYNC_CHANGED |
2750               MAC_STATUS_CFG_CHANGED |
2751               MAC_STATUS_MI_COMPLETION |
2752               MAC_STATUS_LNKSTATE_CHANGED));
2753         udelay(40);
2754
2755         if (force_reset)
2756                 tg3_phy_reset(tp);
2757
2758         current_link_up = 0;
2759         current_speed = SPEED_INVALID;
2760         current_duplex = DUPLEX_INVALID;
2761
2762         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2763         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2765                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2766                         bmsr |= BMSR_LSTATUS;
2767                 else
2768                         bmsr &= ~BMSR_LSTATUS;
2769         }
2770
2771         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2772
2773         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2774             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2775                 /* do nothing, just check for link up at the end */
2776         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2777                 u32 adv, new_adv;
2778
2779                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2780                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2781                                   ADVERTISE_1000XPAUSE |
2782                                   ADVERTISE_1000XPSE_ASYM |
2783                                   ADVERTISE_SLCT);
2784
2785                 /* Always advertise symmetric PAUSE just like copper */
2786                 new_adv |= ADVERTISE_1000XPAUSE;
2787
2788                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2789                         new_adv |= ADVERTISE_1000XHALF;
2790                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2791                         new_adv |= ADVERTISE_1000XFULL;
2792
2793                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2794                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2795                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2796                         tg3_writephy(tp, MII_BMCR, bmcr);
2797
2798                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2799                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2800                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2801
2802                         return err;
2803                 }
2804         } else {
2805                 u32 new_bmcr;
2806
2807                 bmcr &= ~BMCR_SPEED1000;
2808                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2809
2810                 if (tp->link_config.duplex == DUPLEX_FULL)
2811                         new_bmcr |= BMCR_FULLDPLX;
2812
2813                 if (new_bmcr != bmcr) {
2814                         /* BMCR_SPEED1000 is a reserved bit that needs
2815                          * to be set on write.
2816                          */
2817                         new_bmcr |= BMCR_SPEED1000;
2818
2819                         /* Force a linkdown */
2820                         if (netif_carrier_ok(tp->dev)) {
2821                                 u32 adv;
2822
2823                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2824                                 adv &= ~(ADVERTISE_1000XFULL |
2825                                          ADVERTISE_1000XHALF |
2826                                          ADVERTISE_SLCT);
2827                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2828                                 tg3_writephy(tp, MII_BMCR, bmcr |
2829                                                            BMCR_ANRESTART |
2830                                                            BMCR_ANENABLE);
2831                                 udelay(10);
2832                                 netif_carrier_off(tp->dev);
2833                         }
2834                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2835                         bmcr = new_bmcr;
2836                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2837                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2838                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2839                             ASIC_REV_5714) {
2840                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2841                                         bmsr |= BMSR_LSTATUS;
2842                                 else
2843                                         bmsr &= ~BMSR_LSTATUS;
2844                         }
2845                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2846                 }
2847         }
2848
2849         if (bmsr & BMSR_LSTATUS) {
2850                 current_speed = SPEED_1000;
2851                 current_link_up = 1;
2852                 if (bmcr & BMCR_FULLDPLX)
2853                         current_duplex = DUPLEX_FULL;
2854                 else
2855                         current_duplex = DUPLEX_HALF;
2856
2857                 if (bmcr & BMCR_ANENABLE) {
2858                         u32 local_adv, remote_adv, common;
2859
2860                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2861                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2862                         common = local_adv & remote_adv;
2863                         if (common & (ADVERTISE_1000XHALF |
2864                                       ADVERTISE_1000XFULL)) {
2865                                 if (common & ADVERTISE_1000XFULL)
2866                                         current_duplex = DUPLEX_FULL;
2867                                 else
2868                                         current_duplex = DUPLEX_HALF;
2869
2870                                 tg3_setup_flow_control(tp, local_adv,
2871                                                        remote_adv);
2872                         }
2873                         else
2874                                 current_link_up = 0;
2875                 }
2876         }
2877
2878         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2879         if (tp->link_config.active_duplex == DUPLEX_HALF)
2880                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2881
2882         tw32_f(MAC_MODE, tp->mac_mode);
2883         udelay(40);
2884
2885         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2886
2887         tp->link_config.active_speed = current_speed;
2888         tp->link_config.active_duplex = current_duplex;
2889
2890         if (current_link_up != netif_carrier_ok(tp->dev)) {
2891                 if (current_link_up)
2892                         netif_carrier_on(tp->dev);
2893                 else {
2894                         netif_carrier_off(tp->dev);
2895                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2896                 }
2897                 tg3_link_report(tp);
2898         }
2899         return err;
2900 }
2901
2902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2903 {
2904         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2905                 /* Give autoneg time to complete. */
2906                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2907                 return;
2908         }
2909         if (!netif_carrier_ok(tp->dev) &&
2910             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2911                 u32 bmcr;
2912
2913                 tg3_readphy(tp, MII_BMCR, &bmcr);
2914                 if (bmcr & BMCR_ANENABLE) {
2915                         u32 phy1, phy2;
2916
2917                         /* Select shadow register 0x1f */
2918                         tg3_writephy(tp, 0x1c, 0x7c00);
2919                         tg3_readphy(tp, 0x1c, &phy1);
2920
2921                         /* Select expansion interrupt status register */
2922                         tg3_writephy(tp, 0x17, 0x0f01);
2923                         tg3_readphy(tp, 0x15, &phy2);
2924                         tg3_readphy(tp, 0x15, &phy2);
2925
2926                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2927                                 /* We have signal detect and not receiving
2928                                  * config code words, link is up by parallel
2929                                  * detection.
2930                                  */
2931
2932                                 bmcr &= ~BMCR_ANENABLE;
2933                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2934                                 tg3_writephy(tp, MII_BMCR, bmcr);
2935                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2936                         }
2937                 }
2938         }
2939         else if (netif_carrier_ok(tp->dev) &&
2940                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2941                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2942                 u32 phy2;
2943
2944                 /* Select expansion interrupt status register */
2945                 tg3_writephy(tp, 0x17, 0x0f01);
2946                 tg3_readphy(tp, 0x15, &phy2);
2947                 if (phy2 & 0x20) {
2948                         u32 bmcr;
2949
2950                         /* Config code words received, turn on autoneg. */
2951                         tg3_readphy(tp, MII_BMCR, &bmcr);
2952                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2953
2954                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2955
2956                 }
2957         }
2958 }
2959
2960 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2961 {
2962         int err;
2963
2964         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2965                 err = tg3_setup_fiber_phy(tp, force_reset);
2966         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2967                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2968         } else {
2969                 err = tg3_setup_copper_phy(tp, force_reset);
2970         }
2971
2972         if (tp->link_config.active_speed == SPEED_1000 &&
2973             tp->link_config.active_duplex == DUPLEX_HALF)
2974                 tw32(MAC_TX_LENGTHS,
2975                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2976                       (6 << TX_LENGTHS_IPG_SHIFT) |
2977                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2978         else
2979                 tw32(MAC_TX_LENGTHS,
2980                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2981                       (6 << TX_LENGTHS_IPG_SHIFT) |
2982                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2983
2984         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2985                 if (netif_carrier_ok(tp->dev)) {
2986                         tw32(HOSTCC_STAT_COAL_TICKS,
2987                              tp->coal.stats_block_coalesce_usecs);
2988                 } else {
2989                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2990                 }
2991         }
2992
2993         return err;
2994 }
2995
2996 /* This is called whenever we suspect that the system chipset is re-
2997  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2998  * is bogus tx completions. We try to recover by setting the
2999  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3000  * in the workqueue.
3001  */
3002 static void tg3_tx_recover(struct tg3 *tp)
3003 {
3004         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3005                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3006
3007         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3008                "mapped I/O cycles to the network device, attempting to "
3009                "recover. Please report the problem to the driver maintainer "
3010                "and include system chipset information.\n", tp->dev->name);
3011
3012         spin_lock(&tp->lock);
3013         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3014         spin_unlock(&tp->lock);
3015 }
3016
3017 static inline u32 tg3_tx_avail(struct tg3 *tp)
3018 {
3019         smp_mb();
3020         return (tp->tx_pending -
3021                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3022 }
3023
3024 /* Tigon3 never reports partial packet sends.  So we do not
3025  * need special logic to handle SKBs that have not had all
3026  * of their frags sent yet, like SunGEM does.
3027  */
3028 static void tg3_tx(struct tg3 *tp)
3029 {
3030         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3031         u32 sw_idx = tp->tx_cons;
3032
3033         while (sw_idx != hw_idx) {
3034                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3035                 struct sk_buff *skb = ri->skb;
3036                 int i, tx_bug = 0;
3037
3038                 if (unlikely(skb == NULL)) {
3039                         tg3_tx_recover(tp);
3040                         return;
3041                 }
3042
3043                 pci_unmap_single(tp->pdev,
3044                                  pci_unmap_addr(ri, mapping),
3045                                  skb_headlen(skb),
3046                                  PCI_DMA_TODEVICE);
3047
3048                 ri->skb = NULL;
3049
3050                 sw_idx = NEXT_TX(sw_idx);
3051
3052                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3053                         ri = &tp->tx_buffers[sw_idx];
3054                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3055                                 tx_bug = 1;
3056
3057                         pci_unmap_page(tp->pdev,
3058                                        pci_unmap_addr(ri, mapping),
3059                                        skb_shinfo(skb)->frags[i].size,
3060                                        PCI_DMA_TODEVICE);
3061
3062                         sw_idx = NEXT_TX(sw_idx);
3063                 }
3064
3065                 dev_kfree_skb(skb);
3066
3067                 if (unlikely(tx_bug)) {
3068                         tg3_tx_recover(tp);
3069                         return;
3070                 }
3071         }
3072
3073         tp->tx_cons = sw_idx;
3074
3075         /* Need to make the tx_cons update visible to tg3_start_xmit()
3076          * before checking for netif_queue_stopped().  Without the
3077          * memory barrier, there is a small possibility that tg3_start_xmit()
3078          * will miss it and cause the queue to be stopped forever.
3079          */
3080         smp_mb();
3081
3082         if (unlikely(netif_queue_stopped(tp->dev) &&
3083                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3084                 netif_tx_lock(tp->dev);
3085                 if (netif_queue_stopped(tp->dev) &&
3086                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3087                         netif_wake_queue(tp->dev);
3088                 netif_tx_unlock(tp->dev);
3089         }
3090 }
3091
3092 /* Returns size of skb allocated or < 0 on error.
3093  *
3094  * We only need to fill in the address because the other members
3095  * of the RX descriptor are invariant, see tg3_init_rings.
3096  *
3097  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3098  * posting buffers we only dirty the first cache line of the RX
3099  * descriptor (containing the address).  Whereas for the RX status
3100  * buffers the cpu only reads the last cacheline of the RX descriptor
3101  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3102  */
3103 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3104                             int src_idx, u32 dest_idx_unmasked)
3105 {
3106         struct tg3_rx_buffer_desc *desc;
3107         struct ring_info *map, *src_map;
3108         struct sk_buff *skb;
3109         dma_addr_t mapping;
3110         int skb_size, dest_idx;
3111
3112         src_map = NULL;
3113         switch (opaque_key) {
3114         case RXD_OPAQUE_RING_STD:
3115                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3116                 desc = &tp->rx_std[dest_idx];
3117                 map = &tp->rx_std_buffers[dest_idx];
3118                 if (src_idx >= 0)
3119                         src_map = &tp->rx_std_buffers[src_idx];
3120                 skb_size = tp->rx_pkt_buf_sz;
3121                 break;
3122
3123         case RXD_OPAQUE_RING_JUMBO:
3124                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3125                 desc = &tp->rx_jumbo[dest_idx];
3126                 map = &tp->rx_jumbo_buffers[dest_idx];
3127                 if (src_idx >= 0)
3128                         src_map = &tp->rx_jumbo_buffers[src_idx];
3129                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3130                 break;
3131
3132         default:
3133                 return -EINVAL;
3134         };
3135
3136         /* Do not overwrite any of the map or rp information
3137          * until we are sure we can commit to a new buffer.
3138          *
3139          * Callers depend upon this behavior and assume that
3140          * we leave everything unchanged if we fail.
3141          */
3142         skb = netdev_alloc_skb(tp->dev, skb_size);
3143         if (skb == NULL)
3144                 return -ENOMEM;
3145
3146         skb_reserve(skb, tp->rx_offset);
3147
3148         mapping = pci_map_single(tp->pdev, skb->data,
3149                                  skb_size - tp->rx_offset,
3150                                  PCI_DMA_FROMDEVICE);
3151
3152         map->skb = skb;
3153         pci_unmap_addr_set(map, mapping, mapping);
3154
3155         if (src_map != NULL)
3156                 src_map->skb = NULL;
3157
3158         desc->addr_hi = ((u64)mapping >> 32);
3159         desc->addr_lo = ((u64)mapping & 0xffffffff);
3160
3161         return skb_size;
3162 }
3163
3164 /* We only need to move over in the address because the other
3165  * members of the RX descriptor are invariant.  See notes above
3166  * tg3_alloc_rx_skb for full details.
3167  */
3168 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3169                            int src_idx, u32 dest_idx_unmasked)
3170 {
3171         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3172         struct ring_info *src_map, *dest_map;
3173         int dest_idx;
3174
3175         switch (opaque_key) {
3176         case RXD_OPAQUE_RING_STD:
3177                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3178                 dest_desc = &tp->rx_std[dest_idx];
3179                 dest_map = &tp->rx_std_buffers[dest_idx];
3180                 src_desc = &tp->rx_std[src_idx];
3181                 src_map = &tp->rx_std_buffers[src_idx];
3182                 break;
3183
3184         case RXD_OPAQUE_RING_JUMBO:
3185                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3186                 dest_desc = &tp->rx_jumbo[dest_idx];
3187                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3188                 src_desc = &tp->rx_jumbo[src_idx];
3189                 src_map = &tp->rx_jumbo_buffers[src_idx];
3190                 break;
3191
3192         default:
3193                 return;
3194         };
3195
3196         dest_map->skb = src_map->skb;
3197         pci_unmap_addr_set(dest_map, mapping,
3198                            pci_unmap_addr(src_map, mapping));
3199         dest_desc->addr_hi = src_desc->addr_hi;
3200         dest_desc->addr_lo = src_desc->addr_lo;
3201
3202         src_map->skb = NULL;
3203 }
3204
3205 #if TG3_VLAN_TAG_USED
3206 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3207 {
3208         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3209 }
3210 #endif
3211
3212 /* The RX ring scheme is composed of multiple rings which post fresh
3213  * buffers to the chip, and one special ring the chip uses to report
3214  * status back to the host.
3215  *
3216  * The special ring reports the status of received packets to the
3217  * host.  The chip does not write into the original descriptor the
3218  * RX buffer was obtained from.  The chip simply takes the original
3219  * descriptor as provided by the host, updates the status and length
3220  * field, then writes this into the next status ring entry.
3221  *
3222  * Each ring the host uses to post buffers to the chip is described
3223  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3224  * it is first placed into the on-chip ram.  When the packet's length
3225  * is known, it walks down the TG3_BDINFO entries to select the ring.
3226  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3227  * which is within the range of the new packet's length is chosen.
3228  *
3229  * The "separate ring for rx status" scheme may sound queer, but it makes
3230  * sense from a cache coherency perspective.  If only the host writes
3231  * to the buffer post rings, and only the chip writes to the rx status
3232  * rings, then cache lines never move beyond shared-modified state.
3233  * If both the host and chip were to write into the same ring, cache line
3234  * eviction could occur since both entities want it in an exclusive state.
3235  */
3236 static int tg3_rx(struct tg3 *tp, int budget)
3237 {
3238         u32 work_mask, rx_std_posted = 0;
3239         u32 sw_idx = tp->rx_rcb_ptr;
3240         u16 hw_idx;
3241         int received;
3242
3243         hw_idx = tp->hw_status->idx[0].rx_producer;
3244         /*
3245          * We need to order the read of hw_idx and the read of
3246          * the opaque cookie.
3247          */
3248         rmb();
3249         work_mask = 0;
3250         received = 0;
3251         while (sw_idx != hw_idx && budget > 0) {
3252                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3253                 unsigned int len;
3254                 struct sk_buff *skb;
3255                 dma_addr_t dma_addr;
3256                 u32 opaque_key, desc_idx, *post_ptr;
3257
3258                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3259                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3260                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3261                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3262                                                   mapping);
3263                         skb = tp->rx_std_buffers[desc_idx].skb;
3264                         post_ptr = &tp->rx_std_ptr;
3265                         rx_std_posted++;
3266                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3267                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3268                                                   mapping);
3269                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3270                         post_ptr = &tp->rx_jumbo_ptr;
3271                 }
3272                 else {
3273                         goto next_pkt_nopost;
3274                 }
3275
3276                 work_mask |= opaque_key;
3277
3278                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3279                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3280                 drop_it:
3281                         tg3_recycle_rx(tp, opaque_key,
3282                                        desc_idx, *post_ptr);
3283                 drop_it_no_recycle:
3284                         /* Other statistics kept track of by card. */
3285                         tp->net_stats.rx_dropped++;
3286                         goto next_pkt;
3287                 }
3288
3289                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3290
3291                 if (len > RX_COPY_THRESHOLD 
3292                         && tp->rx_offset == 2
3293                         /* rx_offset != 2 iff this is a 5701 card running
3294                          * in PCI-X mode [see tg3_get_invariants()] */
3295                 ) {
3296                         int skb_size;
3297
3298                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3299                                                     desc_idx, *post_ptr);
3300                         if (skb_size < 0)
3301                                 goto drop_it;
3302
3303                         pci_unmap_single(tp->pdev, dma_addr,
3304                                          skb_size - tp->rx_offset,
3305                                          PCI_DMA_FROMDEVICE);
3306
3307                         skb_put(skb, len);
3308                 } else {
3309                         struct sk_buff *copy_skb;
3310
3311                         tg3_recycle_rx(tp, opaque_key,
3312                                        desc_idx, *post_ptr);
3313
3314                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3315                         if (copy_skb == NULL)
3316                                 goto drop_it_no_recycle;
3317
3318                         skb_reserve(copy_skb, 2);
3319                         skb_put(copy_skb, len);
3320                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3321                         memcpy(copy_skb->data, skb->data, len);
3322                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3323
3324                         /* We'll reuse the original ring buffer. */
3325                         skb = copy_skb;
3326                 }
3327
3328                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3329                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3330                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3331                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3332                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3333                 else
3334                         skb->ip_summed = CHECKSUM_NONE;
3335
3336                 skb->protocol = eth_type_trans(skb, tp->dev);
3337 #if TG3_VLAN_TAG_USED
3338                 if (tp->vlgrp != NULL &&
3339                     desc->type_flags & RXD_FLAG_VLAN) {
3340                         tg3_vlan_rx(tp, skb,
3341                                     desc->err_vlan & RXD_VLAN_MASK);
3342                 } else
3343 #endif
3344                         netif_receive_skb(skb);
3345
3346                 tp->dev->last_rx = jiffies;
3347                 received++;
3348                 budget--;
3349
3350 next_pkt:
3351                 (*post_ptr)++;
3352
3353                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3354                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3355
3356                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3357                                      TG3_64BIT_REG_LOW, idx);
3358                         work_mask &= ~RXD_OPAQUE_RING_STD;
3359                         rx_std_posted = 0;
3360                 }
3361 next_pkt_nopost:
3362                 sw_idx++;
3363                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3364
3365                 /* Refresh hw_idx to see if there is new work */
3366                 if (sw_idx == hw_idx) {
3367                         hw_idx = tp->hw_status->idx[0].rx_producer;
3368                         rmb();
3369                 }
3370         }
3371
3372         /* ACK the status ring. */
3373         tp->rx_rcb_ptr = sw_idx;
3374         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3375
3376         /* Some platforms need to sync memory here */
3377         wmb();
3378
3379         /* Refill RX ring(s). */
3380         if (work_mask & RXD_OPAQUE_RING_STD) {
3381                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3382                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3383                              sw_idx);
3384         }
3385         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3386                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3387                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3388                              sw_idx);
3389         }
3390         mmiowb();
3391
3392         return received;
3393 }
3394
3395 static int tg3_poll(struct net_device *netdev, int *budget)
3396 {
3397         struct tg3 *tp = netdev_priv(netdev);
3398         struct tg3_hw_status *sblk = tp->hw_status;
3399         int done;
3400
3401         /* handle link change and other phy events */
3402         if (!(tp->tg3_flags &
3403               (TG3_FLAG_USE_LINKCHG_REG |
3404                TG3_FLAG_POLL_SERDES))) {
3405                 if (sblk->status & SD_STATUS_LINK_CHG) {
3406                         sblk->status = SD_STATUS_UPDATED |
3407                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3408                         spin_lock(&tp->lock);
3409                         tg3_setup_phy(tp, 0);
3410                         spin_unlock(&tp->lock);
3411                 }
3412         }
3413
3414         /* run TX completion thread */
3415         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3416                 tg3_tx(tp);
3417                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3418                         netif_rx_complete(netdev);
3419                         schedule_work(&tp->reset_task);
3420                         return 0;
3421                 }
3422         }
3423
3424         /* run RX thread, within the bounds set by NAPI.
3425          * All RX "locking" is done by ensuring outside
3426          * code synchronizes with dev->poll()
3427          */
3428         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3429                 int orig_budget = *budget;
3430                 int work_done;
3431
3432                 if (orig_budget > netdev->quota)
3433                         orig_budget = netdev->quota;
3434
3435                 work_done = tg3_rx(tp, orig_budget);
3436
3437                 *budget -= work_done;
3438                 netdev->quota -= work_done;
3439         }
3440
3441         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3442                 tp->last_tag = sblk->status_tag;
3443                 rmb();
3444         } else
3445                 sblk->status &= ~SD_STATUS_UPDATED;
3446
3447         /* if no more work, tell net stack and NIC we're done */
3448         done = !tg3_has_work(tp);
3449         if (done) {
3450                 netif_rx_complete(netdev);
3451                 tg3_restart_ints(tp);
3452         }
3453
3454         return (done ? 0 : 1);
3455 }
3456
3457 static void tg3_irq_quiesce(struct tg3 *tp)
3458 {
3459         BUG_ON(tp->irq_sync);
3460
3461         tp->irq_sync = 1;
3462         smp_mb();
3463
3464 #if (LINUX_VERSION_CODE >= 0x2051c)
3465         synchronize_irq(tp->pdev->irq);
3466 #else
3467         synchronize_irq();
3468 #endif
3469 }
3470
3471 static inline int tg3_irq_sync(struct tg3 *tp)
3472 {
3473         return tp->irq_sync;
3474 }
3475
3476 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3477  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3478  * with as well.  Most of the time, this is not necessary except when
3479  * shutting down the device.
3480  */
3481 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3482 {
3483         if (irq_sync)
3484                 tg3_irq_quiesce(tp);
3485         spin_lock_bh(&tp->lock);
3486 }
3487
3488 static inline void tg3_full_unlock(struct tg3 *tp)
3489 {
3490         spin_unlock_bh(&tp->lock);
3491 }
3492
3493 /* One-shot MSI handler - Chip automatically disables interrupt
3494  * after sending MSI so driver doesn't have to do it.
3495  */
3496 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3497 {
3498         struct net_device *dev = dev_id;
3499         struct tg3 *tp = netdev_priv(dev);
3500
3501         prefetch(tp->hw_status);
3502         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3503
3504         if (likely(!tg3_irq_sync(tp)))
3505                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3506
3507         return IRQ_HANDLED;
3508 }
3509
3510 /* MSI ISR - No need to check for interrupt sharing and no need to
3511  * flush status block and interrupt mailbox. PCI ordering rules
3512  * guarantee that MSI will arrive after the status block.
3513  */
3514 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3515 {
3516         struct net_device *dev = dev_id;
3517         struct tg3 *tp = netdev_priv(dev);
3518
3519         prefetch(tp->hw_status);
3520         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3521         /*
3522          * Writing any value to intr-mbox-0 clears PCI INTA# and
3523          * chip-internal interrupt pending events.
3524          * Writing non-zero to intr-mbox-0 additional tells the
3525          * NIC to stop sending us irqs, engaging "in-intr-handler"
3526          * event coalescing.
3527          */
3528         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3529         if (likely(!tg3_irq_sync(tp)))
3530                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3531
3532         return IRQ_RETVAL(1);
3533 }
3534
3535 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3536 {
3537         struct net_device *dev = dev_id;
3538         struct tg3 *tp = netdev_priv(dev);
3539         struct tg3_hw_status *sblk = tp->hw_status;
3540         unsigned int handled = 1;
3541
3542         /* In INTx mode, it is possible for the interrupt to arrive at
3543          * the CPU before the status block posted prior to the interrupt.
3544          * Reading the PCI State register will confirm whether the
3545          * interrupt is ours and will flush the status block.
3546          */
3547         if ((sblk->status & SD_STATUS_UPDATED) ||
3548             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3549                 /*
3550                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3551                  * chip-internal interrupt pending events.
3552                  * Writing non-zero to intr-mbox-0 additional tells the
3553                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3554                  * event coalescing.
3555                  */
3556                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3557                              0x00000001);
3558                 if (tg3_irq_sync(tp))
3559                         goto out;
3560                 sblk->status &= ~SD_STATUS_UPDATED;
3561                 if (likely(tg3_has_work(tp))) {
3562                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3563                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3564                 } else {
3565                         /* No work, shared interrupt perhaps?  re-enable
3566                          * interrupts, and flush that PCI write
3567                          */
3568                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3569                                 0x00000000);
3570                 }
3571         } else {        /* shared interrupt */
3572                 handled = 0;
3573         }
3574 out:
3575         return IRQ_RETVAL(handled);
3576 }
3577
3578 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3579 {
3580         struct net_device *dev = dev_id;
3581         struct tg3 *tp = netdev_priv(dev);
3582         struct tg3_hw_status *sblk = tp->hw_status;
3583         unsigned int handled = 1;
3584
3585         /* In INTx mode, it is possible for the interrupt to arrive at
3586          * the CPU before the status block posted prior to the interrupt.
3587          * Reading the PCI State register will confirm whether the
3588          * interrupt is ours and will flush the status block.
3589          */
3590         if ((sblk->status_tag != tp->last_tag) ||
3591             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3592                 /*
3593                  * writing any value to intr-mbox-0 clears PCI INTA# and
3594                  * chip-internal interrupt pending events.
3595                  * writing non-zero to intr-mbox-0 additional tells the
3596                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3597                  * event coalescing.
3598                  */
3599                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3600                              0x00000001);
3601                 if (tg3_irq_sync(tp))
3602                         goto out;
3603                 if (netif_rx_schedule_prep(dev)) {
3604                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3605                         /* Update last_tag to mark that this status has been
3606                          * seen. Because interrupt may be shared, we may be
3607                          * racing with tg3_poll(), so only update last_tag
3608                          * if tg3_poll() is not scheduled.
3609                          */
3610                         tp->last_tag = sblk->status_tag;
3611                         __netif_rx_schedule(dev);
3612                 }
3613         } else {        /* shared interrupt */
3614                 handled = 0;
3615         }
3616 out:
3617         return IRQ_RETVAL(handled);
3618 }
3619
3620 /* ISR for interrupt test */
3621 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3622                 struct pt_regs *regs)
3623 {
3624         struct net_device *dev = dev_id;
3625         struct tg3 *tp = netdev_priv(dev);
3626         struct tg3_hw_status *sblk = tp->hw_status;
3627
3628         if ((sblk->status & SD_STATUS_UPDATED) ||
3629             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3630                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3631                              0x00000001);
3632                 return IRQ_RETVAL(1);
3633         }
3634         return IRQ_RETVAL(0);
3635 }
3636
3637 static int tg3_init_hw(struct tg3 *, int);
3638 static int tg3_halt(struct tg3 *, int, int);
3639
3640 /* Restart hardware after configuration changes, self-test, etc.
3641  * Invoked with tp->lock held.
3642  */
3643 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3644 {
3645         int err;
3646
3647         err = tg3_init_hw(tp, reset_phy);
3648         if (err) {
3649                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3650                        "aborting.\n", tp->dev->name);
3651                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3652                 tg3_full_unlock(tp);
3653                 del_timer_sync(&tp->timer);
3654                 tp->irq_sync = 0;
3655                 netif_poll_enable(tp->dev);
3656                 dev_close(tp->dev);
3657                 tg3_full_lock(tp, 0);
3658         }
3659         return err;
3660 }
3661
3662 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
3663 static void tg3_poll_controller(struct net_device *dev)
3664 {
3665         struct tg3 *tp = netdev_priv(dev);
3666
3667 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
3668         if (netdump_mode) {
3669                 tg3_interrupt(tp->pdev->irq, dev, NULL);
3670                 if (dev->poll_list.prev) {
3671                         int budget = 64;
3672
3673                         tg3_poll(dev, &budget);
3674                 }
3675         }
3676         else
3677 #endif
3678         tg3_interrupt(tp->pdev->irq, dev, NULL);
3679 }
3680 #endif
3681
3682 static void tg3_reset_task(void *_data)
3683 {
3684         struct tg3 *tp = _data;
3685         unsigned int restart_timer;
3686
3687         tg3_full_lock(tp, 0);
3688         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3689
3690         if (!netif_running(tp->dev)) {
3691                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3692                 tg3_full_unlock(tp);
3693                 return;
3694         }
3695
3696         tg3_full_unlock(tp);
3697
3698         tg3_netif_stop(tp);
3699
3700         tg3_full_lock(tp, 1);
3701
3702         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3703         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3704
3705         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3706                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3707                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3708                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3709                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3710         }
3711
3712         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3713         if (tg3_init_hw(tp, 1))
3714                 goto out;
3715
3716         tg3_netif_start(tp);
3717
3718         if (restart_timer)
3719                 mod_timer(&tp->timer, jiffies + 1);
3720
3721 out:
3722         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723
3724         tg3_full_unlock(tp);
3725 }
3726
3727 static void tg3_tx_timeout(struct net_device *dev)
3728 {
3729         struct tg3 *tp = netdev_priv(dev);
3730
3731         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3732                dev->name);
3733
3734         schedule_work(&tp->reset_task);
3735 }
3736
3737 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3738 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3739 {
3740         u32 base = (u32) mapping & 0xffffffff;
3741
3742         return ((base > 0xffffdcc0) &&
3743                 (base + len + 8 < base));
3744 }
3745
3746 /* Test for DMA addresses > 40-bit */
3747 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3748                                           int len)
3749 {
3750 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3751         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3752                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3753         return 0;
3754 #else
3755         return 0;
3756 #endif
3757 }
3758
3759 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3760
3761 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3762 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3763                                        u32 last_plus_one, u32 *start,
3764                                        u32 base_flags, u32 mss)
3765 {
3766         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3767         dma_addr_t new_addr = 0;
3768         u32 entry = *start;
3769         int i, ret = 0;
3770
3771         if (!new_skb) {
3772                 ret = -1;
3773         } else {
3774                 /* New SKB is guaranteed to be linear. */
3775                 entry = *start;
3776                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3777                                           PCI_DMA_TODEVICE);
3778                 /* Make sure new skb does not cross any 4G boundaries.
3779                  * Drop the packet if it does.
3780                  */
3781                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3782                         ret = -1;
3783                         dev_kfree_skb(new_skb);
3784                         new_skb = NULL;
3785                 } else {
3786                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3787                                     base_flags, 1 | (mss << 1));
3788                         *start = NEXT_TX(entry);
3789                 }
3790         }
3791
3792         /* Now clean up the sw ring entries. */
3793         i = 0;
3794         while (entry != last_plus_one) {
3795                 int len;
3796
3797                 if (i == 0)
3798                         len = skb_headlen(skb);
3799                 else
3800                         len = skb_shinfo(skb)->frags[i-1].size;
3801                 pci_unmap_single(tp->pdev,
3802                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3803                                  len, PCI_DMA_TODEVICE);
3804                 if (i == 0) {
3805                         tp->tx_buffers[entry].skb = new_skb;
3806                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3807                 } else {
3808                         tp->tx_buffers[entry].skb = NULL;
3809                 }
3810                 entry = NEXT_TX(entry);
3811                 i++;
3812         }
3813
3814         dev_kfree_skb(skb);
3815
3816         return ret;
3817 }
3818
3819 static void tg3_set_txd(struct tg3 *tp, int entry,
3820                         dma_addr_t mapping, int len, u32 flags,
3821                         u32 mss_and_is_end)
3822 {
3823         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3824         int is_end = (mss_and_is_end & 0x1);
3825         u32 mss = (mss_and_is_end >> 1);
3826         u32 vlan_tag = 0;
3827
3828         if (is_end)
3829                 flags |= TXD_FLAG_END;
3830         if (flags & TXD_FLAG_VLAN) {
3831                 vlan_tag = flags >> 16;
3832                 flags &= 0xffff;
3833         }
3834         vlan_tag |= (mss << TXD_MSS_SHIFT);
3835
3836         txd->addr_hi = ((u64) mapping >> 32);
3837         txd->addr_lo = ((u64) mapping & 0xffffffff);
3838         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3839         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3840 }
3841
3842 /* hard_start_xmit for devices that don't have any bugs and
3843  * support TG3_FLG2_HW_TSO_2 only.
3844  */
3845 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3846 {
3847         struct tg3 *tp = netdev_priv(dev);
3848         dma_addr_t mapping;
3849         u32 len, entry, base_flags, mss;
3850
3851         len = skb_headlen(skb);
3852
3853         /* We are running in BH disabled context with netif_tx_lock
3854          * and TX reclaim runs via tp->poll inside of a software
3855          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3856          * no IRQ context deadlocks to worry about either.  Rejoice!
3857          */
3858         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3859                 if (!netif_queue_stopped(dev)) {
3860                         netif_stop_queue(dev);
3861
3862                         /* This is a hard error, log it. */
3863                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3864                                "queue awake!\n", dev->name);
3865                 }
3866                 return NETDEV_TX_BUSY;
3867         }
3868
3869         entry = tp->tx_prod;
3870         base_flags = 0;
3871 #if TG3_TSO_SUPPORT != 0
3872         mss = 0;
3873         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3874             (mss = skb_shinfo(skb)->gso_size) != 0) {
3875                 int tcp_opt_len, ip_tcp_len;
3876
3877                 if (skb_header_cloned(skb) &&
3878                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3879                         dev_kfree_skb(skb);
3880                         goto out_unlock;
3881                 }
3882
3883 #ifdef NETIF_F_GSO
3884                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3885                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3886                 else
3887 #endif
3888                 {
3889                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3890                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3891                                      sizeof(struct tcphdr);
3892
3893                         skb->nh.iph->check = 0;
3894                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3895                                                      tcp_opt_len);
3896                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3897                 }
3898
3899                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3900                                TXD_FLAG_CPU_POST_DMA);
3901
3902                 skb->h.th->check = 0;
3903
3904         }
3905         else if (skb->ip_summed == CHECKSUM_HW)
3906                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3907 #else
3908         mss = 0;
3909         if (skb->ip_summed == CHECKSUM_HW)
3910                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3911 #endif
3912 #if TG3_VLAN_TAG_USED
3913         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3914                 base_flags |= (TXD_FLAG_VLAN |
3915                                (vlan_tx_tag_get(skb) << 16));
3916 #endif
3917
3918         /* Queue skb data, a.k.a. the main skb fragment. */
3919         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3920
3921         tp->tx_buffers[entry].skb = skb;
3922         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3923
3924         tg3_set_txd(tp, entry, mapping, len, base_flags,
3925                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3926
3927         entry = NEXT_TX(entry);
3928
3929         /* Now loop through additional data fragments, and queue them. */
3930         if (skb_shinfo(skb)->nr_frags > 0) {
3931                 unsigned int i, last;
3932
3933                 last = skb_shinfo(skb)->nr_frags - 1;
3934                 for (i = 0; i <= last; i++) {
3935                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3936
3937                         len = frag->size;
3938                         mapping = pci_map_page(tp->pdev,
3939                                                frag->page,
3940                                                frag->page_offset,
3941                                                len, PCI_DMA_TODEVICE);
3942
3943                         tp->tx_buffers[entry].skb = NULL;
3944                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3945
3946                         tg3_set_txd(tp, entry, mapping, len,
3947                                     base_flags, (i == last) | (mss << 1));
3948
3949                         entry = NEXT_TX(entry);
3950                 }
3951         }
3952
3953         /* Some platforms need to sync memory here */
3954         wmb();
3955
3956         /* Packets are ready, update Tx producer idx local and on card. */
3957         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3958
3959         tp->tx_prod = entry;
3960         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3961                 netif_stop_queue(dev);
3962                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3963                         netif_wake_queue(tp->dev);
3964         }
3965
3966 #if TG3_TSO_SUPPORT != 0
3967 out_unlock:
3968 #endif
3969         mmiowb();
3970
3971         dev->trans_start = jiffies;
3972
3973         return NETDEV_TX_OK;
3974 }
3975
3976
3977 #if TG3_TSO_SUPPORT != 0
3978 #ifdef NETIF_F_GSO
3979 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3980
3981 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3982  * TSO header is greater than 80 bytes.
3983  */
3984 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3985 {
3986         struct sk_buff *segs, *nskb;
3987
3988         /* Estimate the number of fragments in the worst case */
3989         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3990                 netif_stop_queue(tp->dev);
3991                 return NETDEV_TX_BUSY;
3992         }
3993
3994         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3995         if (unlikely(IS_ERR(segs)))
3996                 goto tg3_tso_bug_end;
3997
3998         do {
3999                 nskb = segs;
4000                 segs = segs->next;
4001                 nskb->next = NULL;
4002                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4003         } while (segs);
4004
4005 tg3_tso_bug_end:
4006         dev_kfree_skb(skb);
4007
4008         return NETDEV_TX_OK;
4009 }
4010 #endif
4011 #endif
4012
4013 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4014  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4015  */
4016 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4017 {
4018         struct tg3 *tp = netdev_priv(dev);
4019         dma_addr_t mapping;
4020         u32 len, entry, base_flags, mss;
4021         int would_hit_hwbug;
4022
4023         len = skb_headlen(skb);
4024
4025         /* We are running in BH disabled context with netif_tx_lock
4026          * and TX reclaim runs via tp->poll inside of a software
4027          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4028          * no IRQ context deadlocks to worry about either.  Rejoice!
4029          */
4030         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4031                 if (!netif_queue_stopped(dev)) {
4032                         netif_stop_queue(dev);
4033
4034                         /* This is a hard error, log it. */
4035                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4036                                "queue awake!\n", dev->name);
4037                 }
4038                 return NETDEV_TX_BUSY;
4039         }
4040
4041         entry = tp->tx_prod;
4042         base_flags = 0;
4043         if (skb->ip_summed == CHECKSUM_HW)
4044                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4045 #if TG3_TSO_SUPPORT != 0
4046         mss = 0;
4047         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4048             (mss = skb_shinfo(skb)->gso_size) != 0) {
4049                 int tcp_opt_len, ip_tcp_len, hdr_len;
4050
4051                 if (skb_header_cloned(skb) &&
4052                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4053                         dev_kfree_skb(skb);
4054                         goto out_unlock;
4055                 }
4056
4057                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4058                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4059
4060                 hdr_len = ip_tcp_len + tcp_opt_len;
4061 #ifdef NETIF_F_GSO
4062                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4063                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4064                         return (tg3_tso_bug(tp, skb));
4065 #endif
4066
4067                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4068                                TXD_FLAG_CPU_POST_DMA);
4069
4070                 skb->nh.iph->check = 0;
4071                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4072                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4073                         skb->h.th->check = 0;
4074                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4075                 }
4076                 else {
4077                         skb->h.th->check =
4078                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4079                                                    skb->nh.iph->daddr,
4080                                                    0, IPPROTO_TCP, 0);
4081                 }
4082
4083                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4084                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4085                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4086                                 int tsflags;
4087
4088                                 tsflags = ((skb->nh.iph->ihl - 5) +
4089                                            (tcp_opt_len >> 2));
4090                                 mss |= (tsflags << 11);
4091                         }
4092                 } else {
4093                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4094                                 int tsflags;
4095
4096                                 tsflags = ((skb->nh.iph->ihl - 5) +
4097                                            (tcp_opt_len >> 2));
4098                                 base_flags |= tsflags << 12;
4099                         }
4100                 }
4101         }
4102 #else
4103         mss = 0;
4104 #endif
4105 #if TG3_VLAN_TAG_USED
4106         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4107                 base_flags |= (TXD_FLAG_VLAN |
4108                                (vlan_tx_tag_get(skb) << 16));
4109 #endif
4110
4111         /* Queue skb data, a.k.a. the main skb fragment. */
4112         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4113
4114         tp->tx_buffers[entry].skb = skb;
4115         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4116
4117         would_hit_hwbug = 0;
4118
4119         if (tg3_4g_overflow_test(mapping, len))
4120                 would_hit_hwbug = 1;
4121
4122         tg3_set_txd(tp, entry, mapping, len, base_flags,
4123                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4124
4125         entry = NEXT_TX(entry);
4126
4127         /* Now loop through additional data fragments, and queue them. */
4128         if (skb_shinfo(skb)->nr_frags > 0) {
4129                 unsigned int i, last;
4130
4131                 last = skb_shinfo(skb)->nr_frags - 1;
4132                 for (i = 0; i <= last; i++) {
4133                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4134
4135                         len = frag->size;
4136                         mapping = pci_map_page(tp->pdev,
4137                                                frag->page,
4138                                                frag->page_offset,
4139                                                len, PCI_DMA_TODEVICE);
4140
4141                         tp->tx_buffers[entry].skb = NULL;
4142                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4143
4144                         if (tg3_4g_overflow_test(mapping, len))
4145                                 would_hit_hwbug = 1;
4146
4147                         if (tg3_40bit_overflow_test(tp, mapping, len))
4148                                 would_hit_hwbug = 1;
4149
4150                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4151                                 tg3_set_txd(tp, entry, mapping, len,
4152                                             base_flags, (i == last)|(mss << 1));
4153                         else
4154                                 tg3_set_txd(tp, entry, mapping, len,
4155                                             base_flags, (i == last));
4156
4157                         entry = NEXT_TX(entry);
4158                 }
4159         }
4160
4161         if (would_hit_hwbug) {
4162                 u32 last_plus_one = entry;
4163                 u32 start;
4164
4165                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4166                 start &= (TG3_TX_RING_SIZE - 1);
4167
4168                 /* If the workaround fails due to memory/mapping
4169                  * failure, silently drop this packet.
4170                  */
4171                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4172                                                 &start, base_flags, mss))
4173                         goto out_unlock;
4174
4175                 entry = start;
4176         }
4177
4178         /* Some platforms need to sync memory here */
4179         wmb();
4180
4181         /* Packets are ready, update Tx producer idx local and on card. */
4182         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4183
4184         tp->tx_prod = entry;
4185         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4186                 netif_stop_queue(dev);
4187                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4188                         netif_wake_queue(tp->dev);
4189         }
4190
4191 out_unlock:
4192         mmiowb();
4193
4194         dev->trans_start = jiffies;
4195
4196         return NETDEV_TX_OK;
4197 }
4198
4199 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4200                                int new_mtu)
4201 {
4202         dev->mtu = new_mtu;
4203
4204         if (new_mtu > ETH_DATA_LEN) {
4205                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4206                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4207 #if TG3_TSO_SUPPORT != 0
4208                         ethtool_op_set_tso(dev, 0);
4209 #endif
4210                 }
4211                 else
4212                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4213         } else {
4214                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4215                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4216                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4217         }
4218 }
4219
4220 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4221 {
4222         struct tg3 *tp = netdev_priv(dev);
4223         int err;
4224
4225         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4226                 return -EINVAL;
4227
4228         if (!netif_running(dev)) {
4229                 /* We'll just catch it later when the
4230                  * device is up'd.
4231                  */
4232                 tg3_set_mtu(dev, tp, new_mtu);
4233                 return 0;
4234         }
4235
4236         tg3_netif_stop(tp);
4237
4238         tg3_full_lock(tp, 1);
4239
4240         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4241
4242         tg3_set_mtu(dev, tp, new_mtu);
4243
4244         err = tg3_restart_hw(tp, 0);
4245
4246         if (!err)
4247                 tg3_netif_start(tp);
4248
4249         tg3_full_unlock(tp);
4250
4251         return err;
4252 }
4253
4254 /* Free up pending packets in all rx/tx rings.
4255  *
4256  * The chip has been shut down and the driver detached from
4257  * the networking, so no interrupts or new tx packets will
4258  * end up in the driver.  tp->{tx,}lock is not held and we are not
4259  * in an interrupt context and thus may sleep.
4260  */
4261 static void tg3_free_rings(struct tg3 *tp)
4262 {
4263         struct ring_info *rxp;
4264         int i;
4265
4266         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4267                 rxp = &tp->rx_std_buffers[i];
4268
4269                 if (rxp->skb == NULL)
4270                         continue;
4271                 pci_unmap_single(tp->pdev,
4272                                  pci_unmap_addr(rxp, mapping),
4273                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4274                                  PCI_DMA_FROMDEVICE);
4275                 dev_kfree_skb_any(rxp->skb);
4276                 rxp->skb = NULL;
4277         }
4278
4279         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4280                 rxp = &tp->rx_jumbo_buffers[i];
4281
4282                 if (rxp->skb == NULL)
4283                         continue;
4284                 pci_unmap_single(tp->pdev,
4285                                  pci_unmap_addr(rxp, mapping),
4286                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4287                                  PCI_DMA_FROMDEVICE);
4288                 dev_kfree_skb_any(rxp->skb);
4289                 rxp->skb = NULL;
4290         }
4291
4292         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4293                 struct tx_ring_info *txp;
4294                 struct sk_buff *skb;
4295                 int j;
4296
4297                 txp = &tp->tx_buffers[i];
4298                 skb = txp->skb;
4299
4300                 if (skb == NULL) {
4301                         i++;
4302                         continue;
4303                 }
4304
4305                 pci_unmap_single(tp->pdev,
4306                                  pci_unmap_addr(txp, mapping),
4307                                  skb_headlen(skb),
4308                                  PCI_DMA_TODEVICE);
4309                 txp->skb = NULL;
4310
4311                 i++;
4312
4313                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4314                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4315                         pci_unmap_page(tp->pdev,
4316                                        pci_unmap_addr(txp, mapping),
4317                                        skb_shinfo(skb)->frags[j].size,
4318                                        PCI_DMA_TODEVICE);
4319                         i++;
4320                 }
4321
4322                 dev_kfree_skb_any(skb);
4323         }
4324 }
4325
4326 /* Initialize tx/rx rings for packet processing.
4327  *
4328  * The chip has been shut down and the driver detached from
4329  * the networking, so no interrupts or new tx packets will
4330  * end up in the driver.  tp->{tx,}lock are held and thus
4331  * we may not sleep.
4332  */
4333 static int tg3_init_rings(struct tg3 *tp)
4334 {
4335         u32 i;
4336
4337         /* Free up all the SKBs. */
4338         tg3_free_rings(tp);
4339
4340         /* Zero out all descriptors. */
4341         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4342         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4343         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4344         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4345
4346         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4347         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4348             (tp->dev->mtu > ETH_DATA_LEN))
4349                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4350
4351         /* Initialize invariants of the rings, we only set this
4352          * stuff once.  This works because the card does not
4353          * write into the rx buffer posting rings.
4354          */
4355         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4356                 struct tg3_rx_buffer_desc *rxd;
4357
4358                 rxd = &tp->rx_std[i];
4359                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4360                         << RXD_LEN_SHIFT;
4361                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4362                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4363                                (i << RXD_OPAQUE_INDEX_SHIFT));
4364         }
4365
4366         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4367                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4368                         struct tg3_rx_buffer_desc *rxd;
4369
4370                         rxd = &tp->rx_jumbo[i];
4371                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4372                                 << RXD_LEN_SHIFT;
4373                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4374                                 RXD_FLAG_JUMBO;
4375                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4376                                (i << RXD_OPAQUE_INDEX_SHIFT));
4377                 }
4378         }
4379
4380         /* Now allocate fresh SKBs for each rx ring. */
4381         for (i = 0; i < tp->rx_pending; i++) {
4382                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4383                         printk(KERN_WARNING PFX
4384                                "%s: Using a smaller RX standard ring, "
4385                                "only %d out of %d buffers were allocated "
4386                                "successfully.\n",
4387                                tp->dev->name, i, tp->rx_pending);
4388                         if (i == 0)
4389                                 return -ENOMEM;
4390                         tp->rx_pending = i;
4391                         break;
4392                 }
4393         }
4394
4395         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4396                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4397                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4398                                              -1, i) < 0) {
4399                                 printk(KERN_WARNING PFX
4400                                        "%s: Using a smaller RX jumbo ring, "
4401                                        "only %d out of %d buffers were "
4402                                        "allocated successfully.\n",
4403                                        tp->dev->name, i, tp->rx_jumbo_pending);
4404                                 if (i == 0) {
4405                                         tg3_free_rings(tp);
4406                                         return -ENOMEM;
4407                                 }
4408                                 tp->rx_jumbo_pending = i;
4409                                 break;
4410                         }
4411                 }
4412         }
4413         return 0;
4414 }
4415
4416 /*
4417  * Must not be invoked with interrupt sources disabled and
4418  * the hardware shutdown down.
4419  */
4420 static void tg3_free_consistent(struct tg3 *tp)
4421 {
4422         kfree(tp->rx_std_buffers);
4423         tp->rx_std_buffers = NULL;
4424         if (tp->rx_std) {
4425                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4426                                     tp->rx_std, tp->rx_std_mapping);
4427                 tp->rx_std = NULL;
4428         }
4429         if (tp->rx_jumbo) {
4430                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4431                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4432                 tp->rx_jumbo = NULL;
4433         }
4434         if (tp->rx_rcb) {
4435                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4436                                     tp->rx_rcb, tp->rx_rcb_mapping);
4437                 tp->rx_rcb = NULL;
4438         }
4439         if (tp->tx_ring) {
4440                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4441                         tp->tx_ring, tp->tx_desc_mapping);
4442                 tp->tx_ring = NULL;
4443         }
4444         if (tp->hw_status) {
4445                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4446                                     tp->hw_status, tp->status_mapping);
4447                 tp->hw_status = NULL;
4448         }
4449         if (tp->hw_stats) {
4450                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4451                                     tp->hw_stats, tp->stats_mapping);
4452                 tp->hw_stats = NULL;
4453         }
4454 }
4455
4456 /*
4457  * Must not be invoked with interrupt sources disabled and
4458  * the hardware shutdown down.  Can sleep.
4459  */
4460 static int tg3_alloc_consistent(struct tg3 *tp)
4461 {
4462         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4463                                       (TG3_RX_RING_SIZE +
4464                                        TG3_RX_JUMBO_RING_SIZE)) +
4465                                      (sizeof(struct tx_ring_info) *
4466                                       TG3_TX_RING_SIZE),
4467                                      GFP_KERNEL);
4468         if (!tp->rx_std_buffers)
4469                 return -ENOMEM;
4470
4471         memset(tp->rx_std_buffers, 0,
4472                (sizeof(struct ring_info) *
4473                 (TG3_RX_RING_SIZE +
4474                  TG3_RX_JUMBO_RING_SIZE)) +
4475                (sizeof(struct tx_ring_info) *
4476                 TG3_TX_RING_SIZE));
4477
4478         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4479         tp->tx_buffers = (struct tx_ring_info *)
4480                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4481
4482         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4483                                           &tp->rx_std_mapping);
4484         if (!tp->rx_std)
4485                 goto err_out;
4486
4487         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4488                                             &tp->rx_jumbo_mapping);
4489
4490         if (!tp->rx_jumbo)
4491                 goto err_out;
4492
4493         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4494                                           &tp->rx_rcb_mapping);
4495         if (!tp->rx_rcb)
4496                 goto err_out;
4497
4498         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4499                                            &tp->tx_desc_mapping);
4500         if (!tp->tx_ring)
4501                 goto err_out;
4502
4503         tp->hw_status = pci_alloc_consistent(tp->pdev,
4504                                              TG3_HW_STATUS_SIZE,
4505                                              &tp->status_mapping);
4506         if (!tp->hw_status)
4507                 goto err_out;
4508
4509         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4510                                             sizeof(struct tg3_hw_stats),
4511                                             &tp->stats_mapping);
4512         if (!tp->hw_stats)
4513                 goto err_out;
4514
4515         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4516         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4517
4518         return 0;
4519
4520 err_out:
4521         tg3_free_consistent(tp);
4522         return -ENOMEM;
4523 }
4524
4525 #define MAX_WAIT_CNT 1000
4526
4527 /* To stop a block, clear the enable bit and poll till it
4528  * clears.  tp->lock is held.
4529  */
4530 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4531 {
4532         unsigned int i;
4533         u32 val;
4534
4535         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4536                 switch (ofs) {
4537                 case RCVLSC_MODE:
4538                 case DMAC_MODE:
4539                 case MBFREE_MODE:
4540                 case BUFMGR_MODE:
4541                 case MEMARB_MODE:
4542                         /* We can't enable/disable these bits of the
4543                          * 5705/5750, just say success.
4544                          */
4545                         return 0;
4546
4547                 default:
4548                         break;
4549                 };
4550         }
4551
4552         val = tr32(ofs);
4553         val &= ~enable_bit;
4554         tw32_f(ofs, val);
4555
4556         for (i = 0; i < MAX_WAIT_CNT; i++) {
4557                 udelay(100);
4558                 val = tr32(ofs);
4559                 if ((val & enable_bit) == 0)
4560                         break;
4561         }
4562
4563         if (i == MAX_WAIT_CNT && !silent) {
4564                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4565                        "ofs=%lx enable_bit=%x\n",
4566                        ofs, enable_bit);
4567                 return -ENODEV;
4568         }
4569
4570         return 0;
4571 }
4572
4573 /* tp->lock is held. */
4574 static int tg3_abort_hw(struct tg3 *tp, int silent)
4575 {
4576         int i, err;
4577
4578         tg3_disable_ints(tp);
4579
4580         tp->rx_mode &= ~RX_MODE_ENABLE;
4581         tw32_f(MAC_RX_MODE, tp->rx_mode);
4582         udelay(10);
4583
4584         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4585         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4586         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4587         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4588         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4589         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4590
4591         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4592         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4593         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4594         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4595         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4596         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4597         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4598
4599         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4600         tw32_f(MAC_MODE, tp->mac_mode);
4601         udelay(40);
4602
4603         tp->tx_mode &= ~TX_MODE_ENABLE;
4604         tw32_f(MAC_TX_MODE, tp->tx_mode);
4605
4606         for (i = 0; i < MAX_WAIT_CNT; i++) {
4607                 udelay(100);
4608                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4609                         break;
4610         }
4611         if (i >= MAX_WAIT_CNT) {
4612                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4613                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4614                        tp->dev->name, tr32(MAC_TX_MODE));
4615                 err |= -ENODEV;
4616         }
4617
4618         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4619         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4620         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4621
4622         tw32(FTQ_RESET, 0xffffffff);
4623         tw32(FTQ_RESET, 0x00000000);
4624
4625         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4626         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4627
4628         if (tp->hw_status)
4629                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4630         if (tp->hw_stats)
4631                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4632
4633         return err;
4634 }
4635
4636 /* tp->lock is held. */
4637 static int tg3_nvram_lock(struct tg3 *tp)
4638 {
4639         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4640                 int i;
4641
4642                 if (tp->nvram_lock_cnt == 0) {
4643                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4644                         for (i = 0; i < 8000; i++) {
4645                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4646                                         break;
4647                                 udelay(20);
4648                         }
4649                         if (i == 8000) {
4650                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4651                                 return -ENODEV;
4652                         }
4653                 }
4654                 tp->nvram_lock_cnt++;
4655         }
4656         return 0;
4657 }
4658
4659 /* tp->lock is held. */
4660 static void tg3_nvram_unlock(struct tg3 *tp)
4661 {
4662         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4663                 if (tp->nvram_lock_cnt > 0)
4664                         tp->nvram_lock_cnt--;
4665                 if (tp->nvram_lock_cnt == 0)
4666                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4667         }
4668 }
4669
4670 /* tp->lock is held. */
4671 static void tg3_enable_nvram_access(struct tg3 *tp)
4672 {
4673         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4674             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4675                 u32 nvaccess = tr32(NVRAM_ACCESS);
4676
4677                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4678         }
4679 }
4680
4681 /* tp->lock is held. */
4682 static void tg3_disable_nvram_access(struct tg3 *tp)
4683 {
4684         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4685             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4686                 u32 nvaccess = tr32(NVRAM_ACCESS);
4687
4688                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4689         }
4690 }
4691
4692 /* tp->lock is held. */
4693 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4694 {
4695         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4696                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4697
4698         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4699                 switch (kind) {
4700                 case RESET_KIND_INIT:
4701                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4702                                       DRV_STATE_START);
4703                         break;
4704
4705                 case RESET_KIND_SHUTDOWN:
4706                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4707                                       DRV_STATE_UNLOAD);
4708                         break;
4709
4710                 case RESET_KIND_SUSPEND:
4711                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4712                                       DRV_STATE_SUSPEND);
4713                         break;
4714
4715                 default:
4716                         break;
4717                 };
4718         }
4719 }
4720
4721 /* tp->lock is held. */
4722 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4723 {
4724         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4725                 switch (kind) {
4726                 case RESET_KIND_INIT:
4727                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4728                                       DRV_STATE_START_DONE);
4729                         break;
4730
4731                 case RESET_KIND_SHUTDOWN:
4732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4733                                       DRV_STATE_UNLOAD_DONE);
4734                         break;
4735
4736                 default:
4737                         break;
4738                 };
4739         }
4740 }
4741
4742 /* tp->lock is held. */
4743 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4744 {
4745         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4746                 switch (kind) {
4747                 case RESET_KIND_INIT:
4748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4749                                       DRV_STATE_START);
4750                         break;
4751
4752                 case RESET_KIND_SHUTDOWN:
4753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4754                                       DRV_STATE_UNLOAD);
4755                         break;
4756
4757                 case RESET_KIND_SUSPEND:
4758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4759                                       DRV_STATE_SUSPEND);
4760                         break;
4761
4762                 default:
4763                         break;
4764                 };
4765         }
4766 }
4767
4768 static int tg3_poll_fw(struct tg3 *tp)
4769 {
4770         int i;
4771         u32 val;
4772
4773         /* Wait for firmware initialization to complete. */
4774         for (i = 0; i < 100000; i++) {
4775                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4776                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4777                         break;
4778                 udelay(10);
4779         }
4780
4781         /* Chip might not be fitted with firmare.  Some Sun onboard
4782          * parts are configured like that.  So don't signal the timeout
4783          * of the above loop as an error, but do report the lack of
4784          * running firmware once.
4785          */
4786         if (i >= 100000 &&
4787             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4788                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4789
4790                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4791                        tp->dev->name);
4792         }
4793
4794         return 0;
4795 }
4796
4797 static void tg3_stop_fw(struct tg3 *);
4798
4799 /* tp->lock is held. */
4800 static int tg3_chip_reset(struct tg3 *tp)
4801 {
4802         u32 val;
4803         void (*write_op)(struct tg3 *, u32, u32);
4804         int err;
4805
4806         tg3_nvram_lock(tp);
4807
4808         /* No matching tg3_nvram_unlock() after this because
4809          * chip reset below will undo the nvram lock.
4810          */
4811         tp->nvram_lock_cnt = 0;
4812
4813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4816                 tw32(GRC_FASTBOOT_PC, 0);
4817
4818         /*
4819          * We must avoid the readl() that normally takes place.
4820          * It locks machines, causes machine checks, and other
4821          * fun things.  So, temporarily disable the 5701
4822          * hardware workaround, while we do the reset.
4823          */
4824         write_op = tp->write32;
4825         if (write_op == tg3_write_flush_reg32)
4826                 tp->write32 = tg3_write32;
4827
4828         /* do the reset */
4829         val = GRC_MISC_CFG_CORECLK_RESET;
4830
4831         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4832                 if (tr32(0x7e2c) == 0x60) {
4833                         tw32(0x7e2c, 0x20);
4834                 }
4835                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4836                         tw32(GRC_MISC_CFG, (1 << 29));
4837                         val |= (1 << 29);
4838                 }
4839         }
4840
4841         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4842                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4843         tw32(GRC_MISC_CFG, val);
4844
4845         /* restore 5701 hardware bug workaround write method */
4846         tp->write32 = write_op;
4847
4848         /* Unfortunately, we have to delay before the PCI read back.
4849          * Some 575X chips even will not respond to a PCI cfg access
4850          * when the reset command is given to the chip.
4851          *
4852          * How do these hardware designers expect things to work
4853          * properly if the PCI write is posted for a long period
4854          * of time?  It is always necessary to have some method by
4855          * which a register read back can occur to push the write
4856          * out which does the reset.
4857          *
4858          * For most tg3 variants the trick below was working.
4859          * Ho hum...
4860          */
4861         udelay(120);
4862
4863         /* Flush PCI posted writes.  The normal MMIO registers
4864          * are inaccessible at this time so this is the only
4865          * way to make this reliably (actually, this is no longer
4866          * the case, see above).  I tried to use indirect
4867          * register read/write but this upset some 5701 variants.
4868          */
4869         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4870
4871         udelay(120);
4872
4873         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4874                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4875                         int i;
4876                         u32 cfg_val;
4877
4878                         /* Wait for link training to complete.  */
4879                         for (i = 0; i < 5000; i++)
4880                                 udelay(100);
4881
4882                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4883                         pci_write_config_dword(tp->pdev, 0xc4,
4884                                                cfg_val | (1 << 15));
4885                 }
4886                 /* Set PCIE max payload size and clear error status.  */
4887                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4888         }
4889
4890         /* Re-enable indirect register accesses. */
4891         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4892                                tp->misc_host_ctrl);
4893
4894         /* Set MAX PCI retry to zero. */
4895         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4896         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4897             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4898                 val |= PCISTATE_RETRY_SAME_DMA;
4899         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4900
4901 #if (LINUX_VERSION_CODE < 0x2060a)
4902         pci_restore_state(tp->pdev, tp->pci_cfg_state);
4903 #else
4904         pci_restore_state(tp->pdev);
4905 #endif
4906
4907         /* Make sure PCI-X relaxed ordering bit is clear. */
4908         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4909         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4910         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4911
4912         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4913                 u32 val;
4914
4915                 /* Chip reset on 5780 will reset MSI enable bit,
4916                  * so need to restore it.
4917                  */
4918                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4919                         u16 ctrl;
4920
4921                         pci_read_config_word(tp->pdev,
4922                                              tp->msi_cap + PCI_MSI_FLAGS,
4923                                              &ctrl);
4924                         pci_write_config_word(tp->pdev,
4925                                               tp->msi_cap + PCI_MSI_FLAGS,
4926                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4927                         val = tr32(MSGINT_MODE);
4928                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4929                 }
4930
4931                 val = tr32(MEMARB_MODE);
4932                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4933
4934         } else
4935                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4936
4937         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4938                 tg3_stop_fw(tp);
4939                 tw32(0x5000, 0x400);
4940         }
4941
4942         tw32(GRC_MODE, tp->grc_mode);
4943
4944         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4945                 u32 val = tr32(0xc4);
4946
4947                 tw32(0xc4, val | (1 << 15));
4948         }
4949
4950         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4952                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4953                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4954                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4955                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4956         }
4957
4958         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4959                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4960                 tw32_f(MAC_MODE, tp->mac_mode);
4961         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4962                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4963                 tw32_f(MAC_MODE, tp->mac_mode);
4964         } else
4965                 tw32_f(MAC_MODE, 0);
4966         udelay(40);
4967
4968         err = tg3_poll_fw(tp);
4969         if (err)
4970                 return err;
4971
4972         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4973             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4974                 u32 val = tr32(0x7c00);
4975
4976                 tw32(0x7c00, val | (1 << 25));
4977         }
4978
4979         /* Reprobe ASF enable state.  */
4980         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4981         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4982         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4983         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4984                 u32 nic_cfg;
4985
4986                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4987                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4988                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4989                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4990                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4991                 }
4992         }
4993
4994         return 0;
4995 }
4996
4997 /* tp->lock is held. */
4998 static void tg3_stop_fw(struct tg3 *tp)
4999 {
5000         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5001                 u32 val;
5002                 int i;
5003
5004                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5005                 val = tr32(GRC_RX_CPU_EVENT);
5006                 val |= (1 << 14);
5007                 tw32(GRC_RX_CPU_EVENT, val);
5008
5009                 /* Wait for RX cpu to ACK the event.  */
5010                 for (i = 0; i < 100; i++) {
5011                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5012                                 break;
5013                         udelay(1);
5014                 }
5015         }
5016 }
5017
5018 /* tp->lock is held. */
5019 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5020 {
5021         int err;
5022
5023         tg3_stop_fw(tp);
5024
5025         tg3_write_sig_pre_reset(tp, kind);
5026
5027         tg3_abort_hw(tp, silent);
5028         err = tg3_chip_reset(tp);
5029
5030         tg3_write_sig_legacy(tp, kind);
5031         tg3_write_sig_post_reset(tp, kind);
5032
5033         if (err)
5034                 return err;
5035
5036         return 0;
5037 }
5038
5039 #define TG3_FW_RELEASE_MAJOR    0x0
5040 #define TG3_FW_RELASE_MINOR     0x0
5041 #define TG3_FW_RELEASE_FIX      0x0
5042 #define TG3_FW_START_ADDR       0x08000000
5043 #define TG3_FW_TEXT_ADDR        0x08000000
5044 #define TG3_FW_TEXT_LEN         0x9c0
5045 #define TG3_FW_RODATA_ADDR      0x080009c0
5046 #define TG3_FW_RODATA_LEN       0x60
5047 #define TG3_FW_DATA_ADDR        0x08000a40
5048 #define TG3_FW_DATA_LEN         0x20
5049 #define TG3_FW_SBSS_ADDR        0x08000a60
5050 #define TG3_FW_SBSS_LEN         0xc
5051 #define TG3_FW_BSS_ADDR         0x08000a70
5052 #define TG3_FW_BSS_LEN          0x10
5053
5054 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5055         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5056         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5057         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5058         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5059         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5060         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5061         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5062         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5063         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5064         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5065         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5066         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5067         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5068         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5069         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5070         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5071         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5072         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5073         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5074         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5075         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5076         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5077         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5078         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5079         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5080         0, 0, 0, 0, 0, 0,
5081         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5082         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5083         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5084         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5085         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5086         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5087         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5088         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5089         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5090         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5091         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5092         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5093         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5094         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5095         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5096         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5097         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5098         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5099         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5100         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5101         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5102         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5103         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5104         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5105         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5106         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5107         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5108         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5109         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5110         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5111         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5112         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5113         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5114         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5115         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5116         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5117         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5118         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5119         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5120         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5121         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5122         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5123         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5124         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5125         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5126         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5127         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5128         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5129         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5130         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5131         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5132         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5133         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5134         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5135         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5136         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5137         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5138         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5139         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5140         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5141         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5142         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5143         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5144         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5145         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5146 };
5147
5148 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5149         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5150         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5151         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5152         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5153         0x00000000
5154 };
5155
5156 #if 0 /* All zeros, don't eat up space with it. */
5157 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5158         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5159         0x00000000, 0x00000000, 0x00000000, 0x00000000
5160 };
5161 #endif
5162
5163 #define RX_CPU_SCRATCH_BASE     0x30000
5164 #define RX_CPU_SCRATCH_SIZE     0x04000
5165 #define TX_CPU_SCRATCH_BASE     0x34000
5166 #define TX_CPU_SCRATCH_SIZE     0x04000
5167
5168 /* tp->lock is held. */
5169 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5170 {
5171         int i;
5172
5173         BUG_ON(offset == TX_CPU_BASE &&
5174             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5175
5176         if (offset == RX_CPU_BASE) {
5177                 for (i = 0; i < 10000; i++) {
5178                         tw32(offset + CPU_STATE, 0xffffffff);
5179                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5180                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5181                                 break;
5182                 }
5183
5184                 tw32(offset + CPU_STATE, 0xffffffff);
5185                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5186                 udelay(10);
5187         } else {
5188                 for (i = 0; i < 10000; i++) {
5189                         tw32(offset + CPU_STATE, 0xffffffff);
5190                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5191                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5192                                 break;
5193                 }
5194         }
5195
5196         if (i >= 10000) {
5197                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5198                        "and %s CPU\n",
5199                        tp->dev->name,
5200                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5201                 return -ENODEV;
5202         }
5203
5204         /* Clear firmware's nvram arbitration. */
5205         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5206                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5207         return 0;
5208 }
5209
5210 struct fw_info {
5211         unsigned int text_base;
5212         unsigned int text_len;
5213         u32 *text_data;
5214         unsigned int rodata_base;
5215         unsigned int rodata_len;
5216         u32 *rodata_data;
5217         unsigned int data_base;
5218         unsigned int data_len;
5219         u32 *data_data;
5220 };
5221
5222 /* tp->lock is held. */
5223 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5224                                  int cpu_scratch_size, struct fw_info *info)
5225 {
5226         int err, lock_err, i;
5227         void (*write_op)(struct tg3 *, u32, u32);
5228
5229         if (cpu_base == TX_CPU_BASE &&
5230             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5231                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5232                        "TX cpu firmware on %s which is 5705.\n",
5233                        tp->dev->name);
5234                 return -EINVAL;
5235         }
5236
5237         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5238                 write_op = tg3_write_mem;
5239         else
5240                 write_op = tg3_write_indirect_reg32;
5241
5242         /* It is possible that bootcode is still loading at this point.
5243          * Get the nvram lock first before halting the cpu.
5244          */
5245         lock_err = tg3_nvram_lock(tp);
5246         err = tg3_halt_cpu(tp, cpu_base);
5247         if (!lock_err)
5248                 tg3_nvram_unlock(tp);
5249         if (err)
5250                 goto out;
5251
5252         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5253                 write_op(tp, cpu_scratch_base + i, 0);
5254         tw32(cpu_base + CPU_STATE, 0xffffffff);
5255         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5256         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5257                 write_op(tp, (cpu_scratch_base +
5258                               (info->text_base & 0xffff) +
5259                               (i * sizeof(u32))),
5260                          (info->text_data ?
5261                           info->text_data[i] : 0));
5262         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5263                 write_op(tp, (cpu_scratch_base +
5264                               (info->rodata_base & 0xffff) +
5265                               (i * sizeof(u32))),
5266                          (info->rodata_data ?
5267                           info->rodata_data[i] : 0));
5268         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5269                 write_op(tp, (cpu_scratch_base +
5270                               (info->data_base & 0xffff) +
5271                               (i * sizeof(u32))),
5272                          (info->data_data ?
5273                           info->data_data[i] : 0));
5274
5275         err = 0;
5276
5277 out:
5278         return err;
5279 }
5280
5281 /* tp->lock is held. */
5282 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5283 {
5284         struct fw_info info;
5285         int err, i;
5286
5287         info.text_base = TG3_FW_TEXT_ADDR;
5288         info.text_len = TG3_FW_TEXT_LEN;
5289         info.text_data = &tg3FwText[0];
5290         info.rodata_base = TG3_FW_RODATA_ADDR;
5291         info.rodata_len = TG3_FW_RODATA_LEN;
5292         info.rodata_data = &tg3FwRodata[0];
5293         info.data_base = TG3_FW_DATA_ADDR;
5294         info.data_len = TG3_FW_DATA_LEN;
5295         info.data_data = NULL;
5296
5297         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5298                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5299                                     &info);
5300         if (err)
5301                 return err;
5302
5303         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5304                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5305                                     &info);
5306         if (err)
5307                 return err;
5308
5309         /* Now startup only the RX cpu. */
5310         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5311         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5312
5313         for (i = 0; i < 5; i++) {
5314                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5315                         break;
5316                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5317                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5318                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5319                 udelay(1000);
5320         }
5321         if (i >= 5) {
5322                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5323                        "to set RX CPU PC, is %08x should be %08x\n",
5324                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5325                        TG3_FW_TEXT_ADDR);
5326                 return -ENODEV;
5327         }
5328         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5329         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5330
5331         return 0;
5332 }
5333
5334 #if TG3_TSO_SUPPORT != 0
5335
5336 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5337 #define TG3_TSO_FW_RELASE_MINOR         0x6
5338 #define TG3_TSO_FW_RELEASE_FIX          0x0
5339 #define TG3_TSO_FW_START_ADDR           0x08000000
5340 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5341 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5342 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5343 #define TG3_TSO_FW_RODATA_LEN           0x60
5344 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5345 #define TG3_TSO_FW_DATA_LEN             0x30
5346 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5347 #define TG3_TSO_FW_SBSS_LEN             0x2c
5348 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5349 #define TG3_TSO_FW_BSS_LEN              0x894
5350
5351 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5352         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5353         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5354         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5355         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5356         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5357         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5358         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5359         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5360         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5361         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5362         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5363         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5364         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5365         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5366         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5367         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5368         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5369         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5370         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5371         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5372         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5373         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5374         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5375         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5376         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5377         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5378         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5379         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5380         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5381         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5382         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5383         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5384         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5385         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5386         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5387         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5388         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5389         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5390         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5391         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5392         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5393         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5394         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5395         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5396         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5397         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5398         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5399         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5400         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5401         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5402         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5403         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5404         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5405         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5406         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5407         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5408         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5409         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5410         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5411         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5412         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5413         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5414         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5415         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5416         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5417         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5418         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5419         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5420         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5421         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5422         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5423         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5424         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5425         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5426         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5427         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5428         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5429         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5430         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5431         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5432         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5433         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5434         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5435         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5436         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5437         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5438         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5439         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5440         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5441         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5442         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5443         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5444         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5445         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5446         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5447         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5448         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5449         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5450         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5451         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5452         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5453         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5454         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5455         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5456         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5457         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5458         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5459         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5460         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5461         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5462         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5463         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5464         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5465         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5466         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5467         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5468         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5469         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5470         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5471         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5472         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5473         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5474         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5475         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5476         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5477         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5478         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5479         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5480         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5481         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5482         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5483         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5484         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5485         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5486         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5487         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5488         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5489         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5490         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5491         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5492         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5493         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5494         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5495         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5496         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5497         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5498         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5499         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5500         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5501         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5502         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5503         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5504         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5505         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5506         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5507         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5508         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5509         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5510         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5511         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5512         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5513         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5514         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5515         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5516         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5517         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5518         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5519         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5520         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5521         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5522         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5523         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5524         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5525         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5526         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5527         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5528         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5529         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5530         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5531         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5532         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5533         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5534         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5535         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5536         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5537         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5538         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5539         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5540         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5541         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5542         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5543         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5544         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5545         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5546         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5547         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5548         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5549         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5550         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5551         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5552         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5553         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5554         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5555         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5556         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5557         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5558         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5559         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5560         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5561         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5562         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5563         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5564         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5565         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5566         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5567         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5568         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5569         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5570         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5571         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5572         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5573         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5574         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5575         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5576         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5577         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5578         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5579         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5580         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5581         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5582         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5583         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5584         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5585         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5586         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5587         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5588         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5589         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5590         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5591         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5592         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5593         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5594         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5595         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5596         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5597         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5598         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5599         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5600         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5601         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5602         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5603         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5604         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5605         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5606         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5607         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5608         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5609         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5610         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5611         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5612         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5613         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5614         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5615         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5616         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5617         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5618         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5619         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5620         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5621         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5622         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5623         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5624         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5625         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5626         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5627         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5628         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5629         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5630         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5631         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5632         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5633         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5634         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5635         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5636 };
5637
5638 static u32 tg3TsoFwRodata[] = {
5639         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5640         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5641         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5642         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5643         0x00000000,
5644 };
5645
5646 static u32 tg3TsoFwData[] = {
5647         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5648         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5649         0x00000000,
5650 };
5651
5652 /* 5705 needs a special version of the TSO firmware.  */
5653 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5654 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5655 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5656 #define TG3_TSO5_FW_START_ADDR          0x00010000
5657 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5658 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5659 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5660 #define TG3_TSO5_FW_RODATA_LEN          0x50
5661 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5662 #define TG3_TSO5_FW_DATA_LEN            0x20
5663 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5664 #define TG3_TSO5_FW_SBSS_LEN            0x28
5665 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5666 #define TG3_TSO5_FW_BSS_LEN             0x88
5667
5668 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5669         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5670         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5671         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5672         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5673         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5674         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5675         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5676         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5677         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5678         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5679         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5680         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5681         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5682         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5683         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5684         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5685         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5686         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5687         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5688         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5689         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5690         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5691         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5692         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5693         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5694         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5695         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5696         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5697         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5698         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5699         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5700         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5701         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5702         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5703         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5704         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5705         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5706         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5707         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5708         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5709         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5710         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5711         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5712         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5713         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5714         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5715         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5716         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5717         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5718         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5719         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5720         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5721         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5722         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5723         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5724         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5725         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5726         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5727         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5728         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5729         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5730         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5731         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5732         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5733         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5734         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5735         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5736         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5737         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5738         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5739         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5740         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5741         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5742         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5743         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5744         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5745         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5746         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5747         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5748         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5749         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5750         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5751         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5752         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5753         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5754         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5755         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5756         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5757         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5758         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5759         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5760         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5761         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5762         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5763         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5764         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5765         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5766         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5767         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5768         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5769         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5770         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5771         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5772         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5773         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5774         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5775         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5776         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5777         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5778         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5779         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5780         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5781         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5782         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5783         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5784         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5785         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5786         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5787         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5788         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5789         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5790         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5791         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5792         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5793         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5794         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5795         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5796         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5797         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5798         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5799         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5800         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5801         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5802         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5803         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5804         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5805         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5806         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5807         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5808         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5809         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5810         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5811         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5812         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5813         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5814         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5815         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5816         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5817         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5818         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5819         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5820         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5821         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5822         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5823         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5824         0x00000000, 0x00000000, 0x00000000,
5825 };
5826
5827 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5828         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5829         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5830         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5831         0x00000000, 0x00000000, 0x00000000,
5832 };
5833
5834 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5835         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5836         0x00000000, 0x00000000, 0x00000000,
5837 };
5838
5839 /* tp->lock is held. */
5840 static int tg3_load_tso_firmware(struct tg3 *tp)
5841 {
5842         struct fw_info info;
5843         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5844         int err, i;
5845
5846         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5847                 return 0;
5848
5849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5850                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5851                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5852                 info.text_data = &tg3Tso5FwText[0];
5853                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5854                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5855                 info.rodata_data = &tg3Tso5FwRodata[0];
5856                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5857                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5858                 info.data_data = &tg3Tso5FwData[0];
5859                 cpu_base = RX_CPU_BASE;
5860                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5861                 cpu_scratch_size = (info.text_len +
5862                                     info.rodata_len +
5863                                     info.data_len +
5864                                     TG3_TSO5_FW_SBSS_LEN +
5865                                     TG3_TSO5_FW_BSS_LEN);
5866         } else {
5867                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5868                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5869                 info.text_data = &tg3TsoFwText[0];
5870                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5871                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5872                 info.rodata_data = &tg3TsoFwRodata[0];
5873                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5874                 info.data_len = TG3_TSO_FW_DATA_LEN;
5875                 info.data_data = &tg3TsoFwData[0];
5876                 cpu_base = TX_CPU_BASE;
5877                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5878                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5879         }
5880
5881         err = tg3_load_firmware_cpu(tp, cpu_base,
5882                                     cpu_scratch_base, cpu_scratch_size,
5883                                     &info);
5884         if (err)
5885                 return err;
5886
5887         /* Now startup the cpu. */
5888         tw32(cpu_base + CPU_STATE, 0xffffffff);
5889         tw32_f(cpu_base + CPU_PC,    info.text_base);
5890
5891         for (i = 0; i < 5; i++) {
5892                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5893                         break;
5894                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5895                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5896                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5897                 udelay(1000);
5898         }
5899         if (i >= 5) {
5900                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5901                        "to set CPU PC, is %08x should be %08x\n",
5902                        tp->dev->name, tr32(cpu_base + CPU_PC),
5903                        info.text_base);
5904                 return -ENODEV;
5905         }
5906         tw32(cpu_base + CPU_STATE, 0xffffffff);
5907         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5908         return 0;
5909 }
5910
5911 #endif /* TG3_TSO_SUPPORT != 0 */
5912
5913 /* tp->lock is held. */
5914 static void __tg3_set_mac_addr(struct tg3 *tp)
5915 {
5916         u32 addr_high, addr_low;
5917         int i;
5918
5919         addr_high = ((tp->dev->dev_addr[0] << 8) |
5920                      tp->dev->dev_addr[1]);
5921         addr_low = ((tp->dev->dev_addr[2] << 24) |
5922                     (tp->dev->dev_addr[3] << 16) |
5923                     (tp->dev->dev_addr[4] <<  8) |
5924                     (tp->dev->dev_addr[5] <<  0));
5925         for (i = 0; i < 4; i++) {
5926                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5927                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5928         }
5929
5930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5932                 for (i = 0; i < 12; i++) {
5933                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5934                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5935                 }
5936         }
5937
5938         addr_high = (tp->dev->dev_addr[0] +
5939                      tp->dev->dev_addr[1] +
5940                      tp->dev->dev_addr[2] +
5941                      tp->dev->dev_addr[3] +
5942                      tp->dev->dev_addr[4] +
5943                      tp->dev->dev_addr[5]) &
5944                 TX_BACKOFF_SEED_MASK;
5945         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5946 }
5947
5948 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5949 {
5950         struct tg3 *tp = netdev_priv(dev);
5951         struct sockaddr *addr = p;
5952         int err = 0;
5953
5954         if (!is_valid_ether_addr(addr->sa_data))
5955                 return -EINVAL;
5956
5957         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5958
5959         if (!netif_running(dev))
5960                 return 0;
5961
5962         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5963                 /* Reset chip so that ASF can re-init any MAC addresses it
5964                  * needs.
5965                  */
5966                 tg3_netif_stop(tp);
5967                 tg3_full_lock(tp, 1);
5968
5969                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5970                 err = tg3_restart_hw(tp, 0);
5971                 if (!err)
5972                         tg3_netif_start(tp);
5973                 tg3_full_unlock(tp);
5974         } else {
5975                 spin_lock_bh(&tp->lock);
5976                 __tg3_set_mac_addr(tp);
5977                 spin_unlock_bh(&tp->lock);
5978         }
5979
5980         return err;
5981 }
5982
5983 /* tp->lock is held. */
5984 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5985                            dma_addr_t mapping, u32 maxlen_flags,
5986                            u32 nic_addr)
5987 {
5988         tg3_write_mem(tp,
5989                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5990                       ((u64) mapping >> 32));
5991         tg3_write_mem(tp,
5992                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5993                       ((u64) mapping & 0xffffffff));
5994         tg3_write_mem(tp,
5995                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5996                        maxlen_flags);
5997
5998         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5999                 tg3_write_mem(tp,
6000                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6001                               nic_addr);
6002 }
6003
6004 static void __tg3_set_rx_mode(struct net_device *);
6005 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6006 {
6007         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6008         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6009         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6010         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6011         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6012                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6013                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6014         }
6015         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6016         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6017         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6018                 u32 val = ec->stats_block_coalesce_usecs;
6019
6020                 if (!netif_carrier_ok(tp->dev))
6021                         val = 0;
6022
6023                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6024         }
6025 }
6026
6027 /* tp->lock is held. */
6028 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6029 {
6030         u32 val, rdmac_mode;
6031         int i, err, limit;
6032
6033         tg3_disable_ints(tp);
6034
6035         tg3_stop_fw(tp);
6036
6037         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6038
6039         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6040                 tg3_abort_hw(tp, 1);
6041         }
6042
6043         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
6044                 tg3_phy_reset(tp);
6045
6046         err = tg3_chip_reset(tp);
6047         if (err)
6048                 return err;
6049
6050         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6051
6052         /* This works around an issue with Athlon chipsets on
6053          * B3 tigon3 silicon.  This bit has no effect on any
6054          * other revision.  But do not set this on PCI Express
6055          * chips.
6056          */
6057         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6058                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6059         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6060
6061         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6062             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6063                 val = tr32(TG3PCI_PCISTATE);
6064                 val |= PCISTATE_RETRY_SAME_DMA;
6065                 tw32(TG3PCI_PCISTATE, val);
6066         }
6067
6068         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6069                 /* Enable some hw fixes.  */
6070                 val = tr32(TG3PCI_MSI_DATA);
6071                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6072                 tw32(TG3PCI_MSI_DATA, val);
6073         }
6074
6075         /* Descriptor ring init may make accesses to the
6076          * NIC SRAM area to setup the TX descriptors, so we
6077          * can only do this after the hardware has been
6078          * successfully reset.
6079          */
6080         err = tg3_init_rings(tp);
6081         if (err)
6082                 return err;
6083
6084         /* This value is determined during the probe time DMA
6085          * engine test, tg3_test_dma.
6086          */
6087         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6088
6089         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6090                           GRC_MODE_4X_NIC_SEND_RINGS |
6091                           GRC_MODE_NO_TX_PHDR_CSUM |
6092                           GRC_MODE_NO_RX_PHDR_CSUM);
6093         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6094
6095         /* Pseudo-header checksum is done by hardware logic and not
6096          * the offload processers, so make the chip do the pseudo-
6097          * header checksums on receive.  For transmit it is more
6098          * convenient to do the pseudo-header checksum in software
6099          * as Linux does that on transmit for us in all cases.
6100          */
6101         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6102
6103         tw32(GRC_MODE,
6104              tp->grc_mode |
6105              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6106
6107         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6108         val = tr32(GRC_MISC_CFG);
6109         val &= ~0xff;
6110         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6111         tw32(GRC_MISC_CFG, val);
6112
6113         /* Initialize MBUF/DESC pool. */
6114         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6115                 /* Do nothing.  */
6116         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6117                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6118                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6119                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6120                 else
6121                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6122                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6123                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6124         }
6125 #if TG3_TSO_SUPPORT != 0
6126         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6127                 int fw_len;
6128
6129                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6130                           TG3_TSO5_FW_RODATA_LEN +
6131                           TG3_TSO5_FW_DATA_LEN +
6132                           TG3_TSO5_FW_SBSS_LEN +
6133                           TG3_TSO5_FW_BSS_LEN);
6134                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6135                 tw32(BUFMGR_MB_POOL_ADDR,
6136                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6137                 tw32(BUFMGR_MB_POOL_SIZE,
6138                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6139         }
6140 #endif
6141
6142         if (tp->dev->mtu <= ETH_DATA_LEN) {
6143                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6144                      tp->bufmgr_config.mbuf_read_dma_low_water);
6145                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6146                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6147                 tw32(BUFMGR_MB_HIGH_WATER,
6148                      tp->bufmgr_config.mbuf_high_water);
6149         } else {
6150                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6151                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6152                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6153                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6154                 tw32(BUFMGR_MB_HIGH_WATER,
6155                      tp->bufmgr_config.mbuf_high_water_jumbo);
6156         }
6157         tw32(BUFMGR_DMA_LOW_WATER,
6158              tp->bufmgr_config.dma_low_water);
6159         tw32(BUFMGR_DMA_HIGH_WATER,
6160              tp->bufmgr_config.dma_high_water);
6161
6162         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6163         for (i = 0; i < 2000; i++) {
6164                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6165                         break;
6166                 udelay(10);
6167         }
6168         if (i >= 2000) {
6169                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6170                        tp->dev->name);
6171                 return -ENODEV;
6172         }
6173
6174         /* Setup replenish threshold. */
6175         val = tp->rx_pending / 8;
6176         if (val == 0)
6177                 val = 1;
6178         else if (val > tp->rx_std_max_post)
6179                 val = tp->rx_std_max_post;
6180
6181         tw32(RCVBDI_STD_THRESH, val);
6182
6183         /* Initialize TG3_BDINFO's at:
6184          *  RCVDBDI_STD_BD:     standard eth size rx ring
6185          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6186          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6187          *
6188          * like so:
6189          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6190          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6191          *                              ring attribute flags
6192          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6193          *
6194          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6195          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6196          *
6197          * The size of each ring is fixed in the firmware, but the location is
6198          * configurable.
6199          */
6200         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6201              ((u64) tp->rx_std_mapping >> 32));
6202         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6203              ((u64) tp->rx_std_mapping & 0xffffffff));
6204         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6205              NIC_SRAM_RX_BUFFER_DESC);
6206
6207         /* Don't even try to program the JUMBO/MINI buffer descriptor
6208          * configs on 5705.
6209          */
6210         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6211                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6212                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6213         } else {
6214                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6215                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6216
6217                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6218                      BDINFO_FLAGS_DISABLED);
6219
6220                 /* Setup replenish threshold. */
6221                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6222
6223                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6224                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6225                              ((u64) tp->rx_jumbo_mapping >> 32));
6226                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6227                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6228                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6229                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6231                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6232                 } else {
6233                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6234                              BDINFO_FLAGS_DISABLED);
6235                 }
6236
6237         }
6238
6239         /* There is only one send ring on 5705/5750, no need to explicitly
6240          * disable the others.
6241          */
6242         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6243                 /* Clear out send RCB ring in SRAM. */
6244                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6245                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6246                                       BDINFO_FLAGS_DISABLED);
6247         }
6248
6249         tp->tx_prod = 0;
6250         tp->tx_cons = 0;
6251         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6252         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6253
6254         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6255                        tp->tx_desc_mapping,
6256                        (TG3_TX_RING_SIZE <<
6257                         BDINFO_FLAGS_MAXLEN_SHIFT),
6258                        NIC_SRAM_TX_BUFFER_DESC);
6259
6260         /* There is only one receive return ring on 5705/5750, no need
6261          * to explicitly disable the others.
6262          */
6263         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6264                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6265                      i += TG3_BDINFO_SIZE) {
6266                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6267                                       BDINFO_FLAGS_DISABLED);
6268                 }
6269         }
6270
6271         tp->rx_rcb_ptr = 0;
6272         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6273
6274         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6275                        tp->rx_rcb_mapping,
6276                        (TG3_RX_RCB_RING_SIZE(tp) <<
6277                         BDINFO_FLAGS_MAXLEN_SHIFT),
6278                        0);
6279
6280         tp->rx_std_ptr = tp->rx_pending;
6281         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6282                      tp->rx_std_ptr);
6283
6284         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6285                                                 tp->rx_jumbo_pending : 0;
6286         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6287                      tp->rx_jumbo_ptr);
6288
6289         /* Initialize MAC address and backoff seed. */
6290         __tg3_set_mac_addr(tp);
6291
6292         /* MTU + ethernet header + FCS + optional VLAN tag */
6293         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6294
6295         /* The slot time is changed by tg3_setup_phy if we
6296          * run at gigabit with half duplex.
6297          */
6298         tw32(MAC_TX_LENGTHS,
6299              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6300              (6 << TX_LENGTHS_IPG_SHIFT) |
6301              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6302
6303         /* Receive rules. */
6304         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6305         tw32(RCVLPC_CONFIG, 0x0181);
6306
6307         /* Calculate RDMAC_MODE setting early, we need it to determine
6308          * the RCVLPC_STATE_ENABLE mask.
6309          */
6310         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6311                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6312                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6313                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6314                       RDMAC_MODE_LNGREAD_ENAB);
6315         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6316                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6317
6318         /* If statement applies to 5705 and 5750 PCI devices only */
6319         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6320              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6321             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6322                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6323                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6324                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6325                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6326                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6327                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6328                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6329                 }
6330         }
6331
6332         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6333                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6334
6335 #if TG3_TSO_SUPPORT != 0
6336         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6337                 rdmac_mode |= (1 << 27);
6338 #endif
6339
6340         /* Receive/send statistics. */
6341         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6342                 val = tr32(RCVLPC_STATS_ENABLE);
6343                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6344                 tw32(RCVLPC_STATS_ENABLE, val);
6345         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6346                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6347                 val = tr32(RCVLPC_STATS_ENABLE);
6348                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6349                 tw32(RCVLPC_STATS_ENABLE, val);
6350         } else {
6351                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6352         }
6353         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6354         tw32(SNDDATAI_STATSENAB, 0xffffff);
6355         tw32(SNDDATAI_STATSCTRL,
6356              (SNDDATAI_SCTRL_ENABLE |
6357               SNDDATAI_SCTRL_FASTUPD));
6358
6359         /* Setup host coalescing engine. */
6360         tw32(HOSTCC_MODE, 0);
6361         for (i = 0; i < 2000; i++) {
6362                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6363                         break;
6364                 udelay(10);
6365         }
6366
6367         __tg3_set_coalesce(tp, &tp->coal);
6368
6369         /* set status block DMA address */
6370         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6371              ((u64) tp->status_mapping >> 32));
6372         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6373              ((u64) tp->status_mapping & 0xffffffff));
6374
6375         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6376                 /* Status/statistics block address.  See tg3_timer,
6377                  * the tg3_periodic_fetch_stats call there, and
6378                  * tg3_get_stats to see how this works for 5705/5750 chips.
6379                  */
6380                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6381                      ((u64) tp->stats_mapping >> 32));
6382                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6383                      ((u64) tp->stats_mapping & 0xffffffff));
6384                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6385                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6386         }
6387
6388         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6389
6390         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6391         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6392         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6393                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6394
6395         /* Clear statistics/status block in chip, and status block in ram. */
6396         for (i = NIC_SRAM_STATS_BLK;
6397              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6398              i += sizeof(u32)) {
6399                 tg3_write_mem(tp, i, 0);
6400                 udelay(40);
6401         }
6402         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6403
6404         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6405                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6406                 /* reset to prevent losing 1st rx packet intermittently */
6407                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6408                 udelay(10);
6409         }
6410
6411         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6412                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6413         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6414         udelay(40);
6415
6416         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6417          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6418          * register to preserve the GPIO settings for LOMs. The GPIOs,
6419          * whether used as inputs or outputs, are set by boot code after
6420          * reset.
6421          */
6422         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6423                 u32 gpio_mask;
6424
6425                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6426                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6427
6428                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6429                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6430                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6431
6432                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6433                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6434
6435                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6436
6437                 /* GPIO1 must be driven high for eeprom write protect */
6438                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6439                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6440         }
6441         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6442         udelay(100);
6443
6444         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6445         tp->last_tag = 0;
6446
6447         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6448                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6449                 udelay(40);
6450         }
6451
6452         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6453                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6454                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6455                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6456                WDMAC_MODE_LNGREAD_ENAB);
6457
6458         /* If statement applies to 5705 and 5750 PCI devices only */
6459         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6460              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6461             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6462                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6463                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6464                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6465                         /* nothing */
6466                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6467                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6468                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6469                         val |= WDMAC_MODE_RX_ACCEL;
6470                 }
6471         }
6472
6473         /* Enable host coalescing bug fix */
6474         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6475             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6476                 val |= (1 << 29);
6477
6478         tw32_f(WDMAC_MODE, val);
6479         udelay(40);
6480
6481         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6482                 val = tr32(TG3PCI_X_CAPS);
6483                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6484                         val &= ~PCIX_CAPS_BURST_MASK;
6485                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6486                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6487                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6488                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6489                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6490                                 val |= (tp->split_mode_max_reqs <<
6491                                         PCIX_CAPS_SPLIT_SHIFT);
6492                 }
6493                 tw32(TG3PCI_X_CAPS, val);
6494         }
6495
6496         tw32_f(RDMAC_MODE, rdmac_mode);
6497         udelay(40);
6498
6499         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6500         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6501                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6502         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6503         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6504         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6505         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6506         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6507 #if TG3_TSO_SUPPORT != 0
6508         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6509                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6510 #endif
6511         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6512         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6513
6514         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6515                 err = tg3_load_5701_a0_firmware_fix(tp);
6516                 if (err)
6517                         return err;
6518         }
6519
6520 #if TG3_TSO_SUPPORT != 0
6521         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6522                 err = tg3_load_tso_firmware(tp);
6523                 if (err)
6524                         return err;
6525         }
6526 #endif
6527
6528         tp->tx_mode = TX_MODE_ENABLE;
6529         tw32_f(MAC_TX_MODE, tp->tx_mode);
6530         udelay(100);
6531
6532         tp->rx_mode = RX_MODE_ENABLE;
6533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6534                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6535
6536         tw32_f(MAC_RX_MODE, tp->rx_mode);
6537         udelay(10);
6538
6539         if (tp->link_config.phy_is_low_power) {
6540                 tp->link_config.phy_is_low_power = 0;
6541                 tp->link_config.speed = tp->link_config.orig_speed;
6542                 tp->link_config.duplex = tp->link_config.orig_duplex;
6543                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6544         }
6545
6546         tp->mi_mode = MAC_MI_MODE_BASE;
6547         tw32_f(MAC_MI_MODE, tp->mi_mode);
6548         udelay(80);
6549
6550         tw32(MAC_LED_CTRL, tp->led_ctrl);
6551
6552         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6553         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6554                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6555                 udelay(10);
6556         }
6557         tw32_f(MAC_RX_MODE, tp->rx_mode);
6558         udelay(10);
6559
6560         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6561                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6562                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6563                         /* Set drive transmission level to 1.2V  */
6564                         /* only if the signal pre-emphasis bit is not set  */
6565                         val = tr32(MAC_SERDES_CFG);
6566                         val &= 0xfffff000;
6567                         val |= 0x880;
6568                         tw32(MAC_SERDES_CFG, val);
6569                 }
6570                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6571                         tw32(MAC_SERDES_CFG, 0x616000);
6572         }
6573
6574         /* Prevent chip from dropping frames when flow control
6575          * is enabled.
6576          */
6577         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6578
6579         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6580             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6581                 /* Use hardware link auto-negotiation */
6582                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6583         }
6584
6585         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6586             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6587                 u32 tmp;
6588
6589                 tmp = tr32(SERDES_RX_CTRL);
6590                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6591                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6592                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6593                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6594         }
6595
6596         err = tg3_setup_phy(tp, reset_phy);
6597         if (err)
6598                 return err;
6599
6600         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6601                 u32 tmp;
6602
6603                 /* Clear CRC stats. */
6604                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6605                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6606                         tg3_readphy(tp, 0x14, &tmp);
6607                 }
6608         }
6609
6610         __tg3_set_rx_mode(tp->dev);
6611
6612         /* Initialize receive rules. */
6613         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6614         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6615         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6616         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6617
6618         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6619             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6620                 limit = 8;
6621         else
6622                 limit = 16;
6623         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6624                 limit -= 4;
6625         switch (limit) {
6626         case 16:
6627                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6628         case 15:
6629                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6630         case 14:
6631                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6632         case 13:
6633                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6634         case 12:
6635                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6636         case 11:
6637                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6638         case 10:
6639                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6640         case 9:
6641                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6642         case 8:
6643                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6644         case 7:
6645                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6646         case 6:
6647                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6648         case 5:
6649                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6650         case 4:
6651                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6652         case 3:
6653                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6654         case 2:
6655         case 1:
6656
6657         default:
6658                 break;
6659         };
6660
6661         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6662
6663         return 0;
6664 }
6665
6666 /* Called at device open time to get the chip ready for
6667  * packet processing.  Invoked with tp->lock held.
6668  */
6669 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6670 {
6671         int err;
6672
6673         /* Force the chip into D0. */
6674         err = tg3_set_power_state(tp, PCI_D0);
6675         if (err)
6676                 goto out;
6677
6678         tg3_switch_clocks(tp);
6679
6680         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6681
6682         err = tg3_reset_hw(tp, reset_phy);
6683
6684 out:
6685         return err;
6686 }
6687
6688 #define TG3_STAT_ADD32(PSTAT, REG) \
6689 do {    u32 __val = tr32(REG); \
6690         (PSTAT)->low += __val; \
6691         if ((PSTAT)->low < __val) \
6692                 (PSTAT)->high += 1; \
6693 } while (0)
6694
6695 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6696 {
6697         struct tg3_hw_stats *sp = tp->hw_stats;
6698
6699         if (!netif_carrier_ok(tp->dev))
6700                 return;
6701
6702         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6703         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6704         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6705         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6706         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6707         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6708         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6709         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6710         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6711         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6712         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6713         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6714         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6715
6716         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6717         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6718         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6719         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6720         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6721         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6722         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6723         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6724         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6725         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6726         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6727         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6728         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6729         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6730
6731         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6732         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6733         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6734 }
6735
6736 static void tg3_timer(unsigned long __opaque)
6737 {
6738         struct tg3 *tp = (struct tg3 *) __opaque;
6739
6740         if (tp->irq_sync)
6741                 goto restart_timer;
6742
6743         spin_lock(&tp->lock);
6744
6745         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6746                 /* All of this garbage is because when using non-tagged
6747                  * IRQ status the mailbox/status_block protocol the chip
6748                  * uses with the cpu is race prone.
6749                  */
6750                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6751                         tw32(GRC_LOCAL_CTRL,
6752                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6753                 } else {
6754                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6755                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6756                 }
6757
6758                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6759                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6760                         spin_unlock(&tp->lock);
6761                         schedule_work(&tp->reset_task);
6762                         return;
6763                 }
6764         }
6765
6766         /* This part only runs once per second. */
6767         if (!--tp->timer_counter) {
6768                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6769                         tg3_periodic_fetch_stats(tp);
6770
6771                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6772                         u32 mac_stat;
6773                         int phy_event;
6774
6775                         mac_stat = tr32(MAC_STATUS);
6776
6777                         phy_event = 0;
6778                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6779                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6780                                         phy_event = 1;
6781                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6782                                 phy_event = 1;
6783
6784                         if (phy_event)
6785                                 tg3_setup_phy(tp, 0);
6786                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6787                         u32 mac_stat = tr32(MAC_STATUS);
6788                         int need_setup = 0;
6789
6790                         if (netif_carrier_ok(tp->dev) &&
6791                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6792                                 need_setup = 1;
6793                         }
6794                         if (! netif_carrier_ok(tp->dev) &&
6795                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6796                                          MAC_STATUS_SIGNAL_DET))) {
6797                                 need_setup = 1;
6798                         }
6799                         if (need_setup) {
6800                                 tw32_f(MAC_MODE,
6801                                      (tp->mac_mode &
6802                                       ~MAC_MODE_PORT_MODE_MASK));
6803                                 udelay(40);
6804                                 tw32_f(MAC_MODE, tp->mac_mode);
6805                                 udelay(40);
6806                                 tg3_setup_phy(tp, 0);
6807                         }
6808                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6809                         tg3_serdes_parallel_detect(tp);
6810
6811                 tp->timer_counter = tp->timer_multiplier;
6812         }
6813
6814         /* Heartbeat is only sent once every 2 seconds.  */
6815         if (!--tp->asf_counter) {
6816                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6817                         u32 val;
6818
6819                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6820                                       FWCMD_NICDRV_ALIVE_DETECT);
6821                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6822                         /* 5 seconds timeout */
6823                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6824                         val = tr32(GRC_RX_CPU_EVENT);
6825                         val |= (1 << 14);
6826                         tw32(GRC_RX_CPU_EVENT, val);
6827                 }
6828                 tp->asf_counter = tp->asf_multiplier;
6829         }
6830
6831         spin_unlock(&tp->lock);
6832
6833 restart_timer:
6834         tp->timer.expires = jiffies + tp->timer_offset;
6835         add_timer(&tp->timer);
6836 }
6837
6838 static int tg3_request_irq(struct tg3 *tp)
6839 {
6840         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6841         unsigned long flags;
6842         struct net_device *dev = tp->dev;
6843
6844         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6845                 fn = tg3_msi;
6846                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6847                         fn = tg3_msi_1shot;
6848                 flags = IRQF_SAMPLE_RANDOM;
6849         } else {
6850                 fn = tg3_interrupt;
6851                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6852                         fn = tg3_interrupt_tagged;
6853                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6854         }
6855         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6856 }
6857
6858 static int tg3_test_interrupt(struct tg3 *tp)
6859 {
6860         struct net_device *dev = tp->dev;
6861         int err, i;
6862         u32 int_mbox = 0;
6863
6864         if (!netif_running(dev))
6865                 return -ENODEV;
6866
6867         tg3_disable_ints(tp);
6868
6869         free_irq(tp->pdev->irq, dev);
6870
6871         err = request_irq(tp->pdev->irq, tg3_test_isr,
6872                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6873         if (err)
6874                 return err;
6875
6876         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6877         tg3_enable_ints(tp);
6878
6879         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6880                HOSTCC_MODE_NOW);
6881
6882         for (i = 0; i < 5; i++) {
6883                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6884                                         TG3_64BIT_REG_LOW);
6885                 if (int_mbox != 0)
6886                         break;
6887
6888 #if (LINUX_VERSION_CODE < 0x20607)
6889                 set_current_state(TASK_UNINTERRUPTIBLE);
6890                 schedule_timeout(10);
6891 #else
6892                 msleep(10);
6893 #endif
6894         }
6895
6896         tg3_disable_ints(tp);
6897
6898         free_irq(tp->pdev->irq, dev);
6899         
6900         err = tg3_request_irq(tp);
6901
6902         if (err)
6903                 return err;
6904
6905         if (int_mbox != 0)
6906                 return 0;
6907
6908         return -EIO;
6909 }
6910
6911 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6912  * successfully restored
6913  */
6914 static int tg3_test_msi(struct tg3 *tp)
6915 {
6916         struct net_device *dev = tp->dev;
6917         int err;
6918         u16 pci_cmd;
6919
6920         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6921                 return 0;
6922
6923         /* Turn off SERR reporting in case MSI terminates with Master
6924          * Abort.
6925          */
6926         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6927         pci_write_config_word(tp->pdev, PCI_COMMAND,
6928                               pci_cmd & ~PCI_COMMAND_SERR);
6929
6930         err = tg3_test_interrupt(tp);
6931
6932         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6933
6934         if (!err)
6935                 return 0;
6936
6937         /* other failures */
6938         if (err != -EIO)
6939                 return err;
6940
6941         /* MSI test failed, go back to INTx mode */
6942         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6943                "switching to INTx mode. Please report this failure to "
6944                "the PCI maintainer and include system chipset information.\n",
6945                        tp->dev->name);
6946
6947         free_irq(tp->pdev->irq, dev);
6948 #ifdef CONFIG_PCI_MSI
6949         pci_disable_msi(tp->pdev);
6950 #endif
6951
6952         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6953
6954         err = tg3_request_irq(tp);
6955         if (err)
6956                 return err;
6957
6958         /* Need to reset the chip because the MSI cycle may have terminated
6959          * with Master Abort.
6960          */
6961         tg3_full_lock(tp, 1);
6962
6963         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6964         err = tg3_init_hw(tp, 1);
6965
6966         tg3_full_unlock(tp);
6967
6968         if (err)
6969                 free_irq(tp->pdev->irq, dev);
6970
6971         return err;
6972 }
6973
6974 static int tg3_open(struct net_device *dev)
6975 {
6976         struct tg3 *tp = netdev_priv(dev);
6977         int err;
6978
6979         tg3_full_lock(tp, 0);
6980
6981         err = tg3_set_power_state(tp, PCI_D0);
6982         if (err)
6983                 return err;
6984
6985         tg3_disable_ints(tp);
6986         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6987
6988         tg3_full_unlock(tp);
6989
6990         /* The placement of this call is tied
6991          * to the setup and use of Host TX descriptors.
6992          */
6993         err = tg3_alloc_consistent(tp);
6994         if (err)
6995                 return err;
6996
6997 #ifdef CONFIG_PCI_MSI
6998         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6999             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
7000             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
7001             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
7002               (tp->pdev_peer == tp->pdev))) {
7003                 /* All MSI supporting chips should support tagged
7004                  * status.  Assert that this is the case.
7005                  */
7006                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7007                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7008                                "Not using MSI.\n", tp->dev->name);
7009                 } else if (pci_enable_msi(tp->pdev) == 0) {
7010                         u32 msi_mode;
7011
7012                         msi_mode = tr32(MSGINT_MODE);
7013                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7014                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7015                 }
7016         }
7017 #endif
7018         err = tg3_request_irq(tp);
7019
7020         if (err) {
7021                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7022 #ifdef CONFIG_PCI_MSI
7023                         pci_disable_msi(tp->pdev);
7024 #endif
7025                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7026                 }
7027                 tg3_free_consistent(tp);
7028                 return err;
7029         }
7030
7031         tg3_full_lock(tp, 0);
7032
7033         err = tg3_init_hw(tp, 1);
7034         if (err) {
7035                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7036                 tg3_free_rings(tp);
7037         } else {
7038                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7039                         tp->timer_offset = HZ;
7040                 else
7041                         tp->timer_offset = HZ / 10;
7042
7043                 BUG_ON(tp->timer_offset > HZ);
7044                 tp->timer_counter = tp->timer_multiplier =
7045                         (HZ / tp->timer_offset);
7046                 tp->asf_counter = tp->asf_multiplier =
7047                         ((HZ / tp->timer_offset) * 2);
7048
7049                 init_timer(&tp->timer);
7050                 tp->timer.expires = jiffies + tp->timer_offset;
7051                 tp->timer.data = (unsigned long) tp;
7052                 tp->timer.function = tg3_timer;
7053         }
7054
7055         tg3_full_unlock(tp);
7056
7057         if (err) {
7058                 free_irq(tp->pdev->irq, dev);
7059                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7060 #ifdef CONFIG_PCI_MSI
7061                         pci_disable_msi(tp->pdev);
7062 #endif
7063                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7064                 }
7065                 tg3_free_consistent(tp);
7066                 return err;
7067         }
7068
7069         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7070                 err = tg3_test_msi(tp);
7071
7072                 if (err) {
7073                         tg3_full_lock(tp, 0);
7074
7075                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7076 #ifdef CONFIG_PCI_MSI
7077                                 pci_disable_msi(tp->pdev);
7078 #endif
7079                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7080                         }
7081                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7082                         tg3_free_rings(tp);
7083                         tg3_free_consistent(tp);
7084
7085                         tg3_full_unlock(tp);
7086
7087                         return err;
7088                 }
7089
7090                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7091                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7092                                 u32 val = tr32(0x7c04);
7093
7094                                 tw32(0x7c04, val | (1 << 29));
7095                         }
7096                 }
7097         }
7098
7099         tg3_full_lock(tp, 0);
7100
7101         add_timer(&tp->timer);
7102         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7103         tg3_enable_ints(tp);
7104
7105         tg3_full_unlock(tp);
7106
7107         netif_start_queue(dev);
7108
7109         return 0;
7110 }
7111
7112 #if 0
7113 /*static*/ void tg3_dump_state(struct tg3 *tp)
7114 {
7115         u32 val32, val32_2, val32_3, val32_4, val32_5;
7116         u16 val16;
7117         int i;
7118
7119         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7120         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7121         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7122                val16, val32);
7123
7124         /* MAC block */
7125         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7126                tr32(MAC_MODE), tr32(MAC_STATUS));
7127         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7128                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7129         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7130                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7131         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7132                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7133
7134         /* Send data initiator control block */
7135         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7136                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7137         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7138                tr32(SNDDATAI_STATSCTRL));
7139
7140         /* Send data completion control block */
7141         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7142
7143         /* Send BD ring selector block */
7144         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7145                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7146
7147         /* Send BD initiator control block */
7148         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7149                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7150
7151         /* Send BD completion control block */
7152         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7153
7154         /* Receive list placement control block */
7155         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7156                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7157         printk("       RCVLPC_STATSCTRL[%08x]\n",
7158                tr32(RCVLPC_STATSCTRL));
7159
7160         /* Receive data and receive BD initiator control block */
7161         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7162                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7163
7164         /* Receive data completion control block */
7165         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7166                tr32(RCVDCC_MODE));
7167
7168         /* Receive BD initiator control block */
7169         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7170                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7171
7172         /* Receive BD completion control block */
7173         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7174                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7175
7176         /* Receive list selector control block */
7177         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7178                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7179
7180         /* Mbuf cluster free block */
7181         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7182                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7183
7184         /* Host coalescing control block */
7185         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7186                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7187         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7188                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7189                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7190         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7191                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7192                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7193         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7194                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7195         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7196                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7197
7198         /* Memory arbiter control block */
7199         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7200                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7201
7202         /* Buffer manager control block */
7203         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7204                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7205         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7206                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7207         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7208                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7209                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7210                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7211
7212         /* Read DMA control block */
7213         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7214                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7215
7216         /* Write DMA control block */
7217         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7218                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7219
7220         /* DMA completion block */
7221         printk("DEBUG: DMAC_MODE[%08x]\n",
7222                tr32(DMAC_MODE));
7223
7224         /* GRC block */
7225         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7226                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7227         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7228                tr32(GRC_LOCAL_CTRL));
7229
7230         /* TG3_BDINFOs */
7231         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7232                tr32(RCVDBDI_JUMBO_BD + 0x0),
7233                tr32(RCVDBDI_JUMBO_BD + 0x4),
7234                tr32(RCVDBDI_JUMBO_BD + 0x8),
7235                tr32(RCVDBDI_JUMBO_BD + 0xc));
7236         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7237                tr32(RCVDBDI_STD_BD + 0x0),
7238                tr32(RCVDBDI_STD_BD + 0x4),
7239                tr32(RCVDBDI_STD_BD + 0x8),
7240                tr32(RCVDBDI_STD_BD + 0xc));
7241         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7242                tr32(RCVDBDI_MINI_BD + 0x0),
7243                tr32(RCVDBDI_MINI_BD + 0x4),
7244                tr32(RCVDBDI_MINI_BD + 0x8),
7245                tr32(RCVDBDI_MINI_BD + 0xc));
7246
7247         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7248         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7249         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7250         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7251         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7252                val32, val32_2, val32_3, val32_4);
7253
7254         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7255         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7256         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7257         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7258         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7259                val32, val32_2, val32_3, val32_4);
7260
7261         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7262         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7263         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7264         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7265         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7266         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7267                val32, val32_2, val32_3, val32_4, val32_5);
7268
7269         /* SW status block */
7270         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7271                tp->hw_status->status,
7272                tp->hw_status->status_tag,
7273                tp->hw_status->rx_jumbo_consumer,
7274                tp->hw_status->rx_consumer,
7275                tp->hw_status->rx_mini_consumer,
7276                tp->hw_status->idx[0].rx_producer,
7277                tp->hw_status->idx[0].tx_consumer);
7278
7279         /* SW statistics block */
7280         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7281                ((u32 *)tp->hw_stats)[0],
7282                ((u32 *)tp->hw_stats)[1],
7283                ((u32 *)tp->hw_stats)[2],
7284                ((u32 *)tp->hw_stats)[3]);
7285
7286         /* Mailboxes */
7287         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7288                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7289                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7290                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7291                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7292
7293         /* NIC side send descriptors. */
7294         for (i = 0; i < 6; i++) {
7295                 unsigned long txd;
7296
7297                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7298                         + (i * sizeof(struct tg3_tx_buffer_desc));
7299                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7300                        i,
7301                        readl(txd + 0x0), readl(txd + 0x4),
7302                        readl(txd + 0x8), readl(txd + 0xc));
7303         }
7304
7305         /* NIC side RX descriptors. */
7306         for (i = 0; i < 6; i++) {
7307                 unsigned long rxd;
7308
7309                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7310                         + (i * sizeof(struct tg3_rx_buffer_desc));
7311                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7312                        i,
7313                        readl(rxd + 0x0), readl(rxd + 0x4),
7314                        readl(rxd + 0x8), readl(rxd + 0xc));
7315                 rxd += (4 * sizeof(u32));
7316                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7317                        i,
7318                        readl(rxd + 0x0), readl(rxd + 0x4),
7319                        readl(rxd + 0x8), readl(rxd + 0xc));
7320         }
7321
7322         for (i = 0; i < 6; i++) {
7323                 unsigned long rxd;
7324
7325                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7326                         + (i * sizeof(struct tg3_rx_buffer_desc));
7327                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7328                        i,
7329                        readl(rxd + 0x0), readl(rxd + 0x4),
7330                        readl(rxd + 0x8), readl(rxd + 0xc));
7331                 rxd += (4 * sizeof(u32));
7332                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7333                        i,
7334                        readl(rxd + 0x0), readl(rxd + 0x4),
7335                        readl(rxd + 0x8), readl(rxd + 0xc));
7336         }
7337 }
7338 #endif
7339
7340 static struct net_device_stats *tg3_get_stats(struct net_device *);
7341 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7342
7343 static int tg3_close(struct net_device *dev)
7344 {
7345         struct tg3 *tp = netdev_priv(dev);
7346
7347         /* Calling flush_scheduled_work() may deadlock because
7348          * linkwatch_event() may be on the workqueue and it will try to get
7349          * the rtnl_lock which we are holding.
7350          */
7351         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK) {
7352 #if (LINUX_VERSION_CODE < 0x20607)
7353                 set_current_state(TASK_UNINTERRUPTIBLE);
7354                 schedule_timeout(1);
7355 #else
7356                 msleep(1);
7357 #endif
7358         }
7359
7360         netif_stop_queue(dev);
7361
7362         del_timer_sync(&tp->timer);
7363
7364         tg3_full_lock(tp, 1);
7365 #if 0
7366         tg3_dump_state(tp);
7367 #endif
7368
7369         tg3_disable_ints(tp);
7370
7371         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7372         tg3_free_rings(tp);
7373         tp->tg3_flags &=
7374                 ~(TG3_FLAG_INIT_COMPLETE |
7375                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7376
7377         tg3_full_unlock(tp);
7378
7379         free_irq(tp->pdev->irq, dev);
7380         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7381 #ifdef CONFIG_PCI_MSI
7382                 pci_disable_msi(tp->pdev);
7383 #endif
7384                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7385         }
7386
7387         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7388                sizeof(tp->net_stats_prev));
7389         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7390                sizeof(tp->estats_prev));
7391
7392         tg3_free_consistent(tp);
7393
7394         tg3_set_power_state(tp, PCI_D3hot);
7395
7396         netif_carrier_off(tp->dev);
7397
7398         return 0;
7399 }
7400
7401 static inline unsigned long get_stat64(tg3_stat64_t *val)
7402 {
7403         unsigned long ret;
7404
7405 #if (BITS_PER_LONG == 32)
7406         ret = val->low;
7407 #else
7408         ret = ((u64)val->high << 32) | ((u64)val->low);
7409 #endif
7410         return ret;
7411 }
7412
7413 static unsigned long calc_crc_errors(struct tg3 *tp)
7414 {
7415         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7416
7417         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7418             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7419              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7420                 u32 val;
7421
7422                 spin_lock_bh(&tp->lock);
7423                 if (!tg3_readphy(tp, 0x1e, &val)) {
7424                         tg3_writephy(tp, 0x1e, val | 0x8000);
7425                         tg3_readphy(tp, 0x14, &val);
7426                 } else
7427                         val = 0;
7428                 spin_unlock_bh(&tp->lock);
7429
7430                 tp->phy_crc_errors += val;
7431
7432                 return tp->phy_crc_errors;
7433         }
7434
7435         return get_stat64(&hw_stats->rx_fcs_errors);
7436 }
7437
7438 #define ESTAT_ADD(member) \
7439         estats->member =        old_estats->member + \
7440                                 get_stat64(&hw_stats->member)
7441
7442 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7443 {
7444         struct tg3_ethtool_stats *estats = &tp->estats;
7445         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7446         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7447
7448         if (!hw_stats)
7449                 return old_estats;
7450
7451         ESTAT_ADD(rx_octets);
7452         ESTAT_ADD(rx_fragments);
7453         ESTAT_ADD(rx_ucast_packets);
7454         ESTAT_ADD(rx_mcast_packets);
7455         ESTAT_ADD(rx_bcast_packets);
7456         ESTAT_ADD(rx_fcs_errors);
7457         ESTAT_ADD(rx_align_errors);
7458         ESTAT_ADD(rx_xon_pause_rcvd);
7459         ESTAT_ADD(rx_xoff_pause_rcvd);
7460         ESTAT_ADD(rx_mac_ctrl_rcvd);
7461         ESTAT_ADD(rx_xoff_entered);
7462         ESTAT_ADD(rx_frame_too_long_errors);
7463         ESTAT_ADD(rx_jabbers);
7464         ESTAT_ADD(rx_undersize_packets);
7465         ESTAT_ADD(rx_in_length_errors);
7466         ESTAT_ADD(rx_out_length_errors);
7467         ESTAT_ADD(rx_64_or_less_octet_packets);
7468         ESTAT_ADD(rx_65_to_127_octet_packets);
7469         ESTAT_ADD(rx_128_to_255_octet_packets);
7470         ESTAT_ADD(rx_256_to_511_octet_packets);
7471         ESTAT_ADD(rx_512_to_1023_octet_packets);
7472         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7473         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7474         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7475         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7476         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7477
7478         ESTAT_ADD(tx_octets);
7479         ESTAT_ADD(tx_collisions);
7480         ESTAT_ADD(tx_xon_sent);
7481         ESTAT_ADD(tx_xoff_sent);
7482         ESTAT_ADD(tx_flow_control);
7483         ESTAT_ADD(tx_mac_errors);
7484         ESTAT_ADD(tx_single_collisions);
7485         ESTAT_ADD(tx_mult_collisions);
7486         ESTAT_ADD(tx_deferred);
7487         ESTAT_ADD(tx_excessive_collisions);
7488         ESTAT_ADD(tx_late_collisions);
7489         ESTAT_ADD(tx_collide_2times);
7490         ESTAT_ADD(tx_collide_3times);
7491         ESTAT_ADD(tx_collide_4times);
7492         ESTAT_ADD(tx_collide_5times);
7493         ESTAT_ADD(tx_collide_6times);
7494         ESTAT_ADD(tx_collide_7times);
7495         ESTAT_ADD(tx_collide_8times);
7496         ESTAT_ADD(tx_collide_9times);
7497         ESTAT_ADD(tx_collide_10times);
7498         ESTAT_ADD(tx_collide_11times);
7499         ESTAT_ADD(tx_collide_12times);
7500         ESTAT_ADD(tx_collide_13times);
7501         ESTAT_ADD(tx_collide_14times);
7502         ESTAT_ADD(tx_collide_15times);
7503         ESTAT_ADD(tx_ucast_packets);
7504         ESTAT_ADD(tx_mcast_packets);
7505         ESTAT_ADD(tx_bcast_packets);
7506         ESTAT_ADD(tx_carrier_sense_errors);
7507         ESTAT_ADD(tx_discards);
7508         ESTAT_ADD(tx_errors);
7509
7510         ESTAT_ADD(dma_writeq_full);
7511         ESTAT_ADD(dma_write_prioq_full);
7512         ESTAT_ADD(rxbds_empty);
7513         ESTAT_ADD(rx_discards);
7514         ESTAT_ADD(rx_errors);
7515         ESTAT_ADD(rx_threshold_hit);
7516
7517         ESTAT_ADD(dma_readq_full);
7518         ESTAT_ADD(dma_read_prioq_full);
7519         ESTAT_ADD(tx_comp_queue_full);
7520
7521         ESTAT_ADD(ring_set_send_prod_index);
7522         ESTAT_ADD(ring_status_update);
7523         ESTAT_ADD(nic_irqs);
7524         ESTAT_ADD(nic_avoided_irqs);
7525         ESTAT_ADD(nic_tx_threshold_hit);
7526
7527         return estats;
7528 }
7529
7530 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7531 {
7532         struct tg3 *tp = netdev_priv(dev);
7533         struct net_device_stats *stats = &tp->net_stats;
7534         struct net_device_stats *old_stats = &tp->net_stats_prev;
7535         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7536
7537         if (!hw_stats)
7538                 return old_stats;
7539
7540         stats->rx_packets = old_stats->rx_packets +
7541                 get_stat64(&hw_stats->rx_ucast_packets) +
7542                 get_stat64(&hw_stats->rx_mcast_packets) +
7543                 get_stat64(&hw_stats->rx_bcast_packets);
7544                 
7545         stats->tx_packets = old_stats->tx_packets +
7546                 get_stat64(&hw_stats->tx_ucast_packets) +
7547                 get_stat64(&hw_stats->tx_mcast_packets) +
7548                 get_stat64(&hw_stats->tx_bcast_packets);
7549
7550         stats->rx_bytes = old_stats->rx_bytes +
7551                 get_stat64(&hw_stats->rx_octets);
7552         stats->tx_bytes = old_stats->tx_bytes +
7553                 get_stat64(&hw_stats->tx_octets);
7554
7555         stats->rx_errors = old_stats->rx_errors +
7556                 get_stat64(&hw_stats->rx_errors);
7557         stats->tx_errors = old_stats->tx_errors +
7558                 get_stat64(&hw_stats->tx_errors) +
7559                 get_stat64(&hw_stats->tx_mac_errors) +
7560                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7561                 get_stat64(&hw_stats->tx_discards);
7562
7563         stats->multicast = old_stats->multicast +
7564                 get_stat64(&hw_stats->rx_mcast_packets);
7565         stats->collisions = old_stats->collisions +
7566                 get_stat64(&hw_stats->tx_collisions);
7567
7568         stats->rx_length_errors = old_stats->rx_length_errors +
7569                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7570                 get_stat64(&hw_stats->rx_undersize_packets);
7571
7572         stats->rx_over_errors = old_stats->rx_over_errors +
7573                 get_stat64(&hw_stats->rxbds_empty);
7574         stats->rx_frame_errors = old_stats->rx_frame_errors +
7575                 get_stat64(&hw_stats->rx_align_errors);
7576         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7577                 get_stat64(&hw_stats->tx_discards);
7578         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7579                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7580
7581         stats->rx_crc_errors = old_stats->rx_crc_errors +
7582                 calc_crc_errors(tp);
7583
7584         stats->rx_missed_errors = old_stats->rx_missed_errors +
7585                 get_stat64(&hw_stats->rx_discards);
7586
7587         return stats;
7588 }
7589
7590 static inline u32 calc_crc(unsigned char *buf, int len)
7591 {
7592         u32 reg;
7593         u32 tmp;
7594         int j, k;
7595
7596         reg = 0xffffffff;
7597
7598         for (j = 0; j < len; j++) {
7599                 reg ^= buf[j];
7600
7601                 for (k = 0; k < 8; k++) {
7602                         tmp = reg & 0x01;
7603
7604                         reg >>= 1;
7605
7606                         if (tmp) {
7607                                 reg ^= 0xedb88320;
7608                         }
7609                 }
7610         }
7611
7612         return ~reg;
7613 }
7614
7615 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7616 {
7617         /* accept or reject all multicast frames */
7618         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7619         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7620         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7621         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7622 }
7623
7624 static void __tg3_set_rx_mode(struct net_device *dev)
7625 {
7626         struct tg3 *tp = netdev_priv(dev);
7627         u32 rx_mode;
7628
7629         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7630                                   RX_MODE_KEEP_VLAN_TAG);
7631
7632         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7633          * flag clear.
7634          */
7635 #if TG3_VLAN_TAG_USED
7636         if (!tp->vlgrp &&
7637             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7638                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7639 #else
7640         /* By definition, VLAN is disabled always in this
7641          * case.
7642          */
7643         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7644                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7645 #endif
7646
7647         if (dev->flags & IFF_PROMISC) {
7648                 /* Promiscuous mode. */
7649                 rx_mode |= RX_MODE_PROMISC;
7650         } else if (dev->flags & IFF_ALLMULTI) {
7651                 /* Accept all multicast. */
7652                 tg3_set_multi (tp, 1);
7653         } else if (dev->mc_count < 1) {
7654                 /* Reject all multicast. */
7655                 tg3_set_multi (tp, 0);
7656         } else {
7657                 /* Accept one or more multicast(s). */
7658                 struct dev_mc_list *mclist;
7659                 unsigned int i;
7660                 u32 mc_filter[4] = { 0, };
7661                 u32 regidx;
7662                 u32 bit;
7663                 u32 crc;
7664
7665                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7666                      i++, mclist = mclist->next) {
7667
7668                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7669                         bit = ~crc & 0x7f;
7670                         regidx = (bit & 0x60) >> 5;
7671                         bit &= 0x1f;
7672                         mc_filter[regidx] |= (1 << bit);
7673                 }
7674
7675                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7676                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7677                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7678                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7679         }
7680
7681         if (rx_mode != tp->rx_mode) {
7682                 tp->rx_mode = rx_mode;
7683                 tw32_f(MAC_RX_MODE, rx_mode);
7684                 udelay(10);
7685         }
7686 }
7687
7688 static void tg3_set_rx_mode(struct net_device *dev)
7689 {
7690         struct tg3 *tp = netdev_priv(dev);
7691
7692         if (!netif_running(dev))
7693                 return;
7694
7695         tg3_full_lock(tp, 0);
7696         __tg3_set_rx_mode(dev);
7697         tg3_full_unlock(tp);
7698 }
7699
7700 #define TG3_REGDUMP_LEN         (32 * 1024)
7701
7702 static int tg3_get_regs_len(struct net_device *dev)
7703 {
7704         return TG3_REGDUMP_LEN;
7705 }
7706
7707 static void tg3_get_regs(struct net_device *dev,
7708                 struct ethtool_regs *regs, void *_p)
7709 {
7710         u32 *p = _p;
7711         struct tg3 *tp = netdev_priv(dev);
7712         u8 *orig_p = _p;
7713         int i;
7714
7715         regs->version = 0;
7716
7717         memset(p, 0, TG3_REGDUMP_LEN);
7718
7719         if (tp->link_config.phy_is_low_power)
7720                 return;
7721
7722         tg3_full_lock(tp, 0);
7723
7724 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7725 #define GET_REG32_LOOP(base,len)                \
7726 do {    p = (u32 *)(orig_p + (base));           \
7727         for (i = 0; i < len; i += 4)            \
7728                 __GET_REG32((base) + i);        \
7729 } while (0)
7730 #define GET_REG32_1(reg)                        \
7731 do {    p = (u32 *)(orig_p + (reg));            \
7732         __GET_REG32((reg));                     \
7733 } while (0)
7734
7735         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7736         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7737         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7738         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7739         GET_REG32_1(SNDDATAC_MODE);
7740         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7741         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7742         GET_REG32_1(SNDBDC_MODE);
7743         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7744         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7745         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7746         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7747         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7748         GET_REG32_1(RCVDCC_MODE);
7749         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7750         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7751         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7752         GET_REG32_1(MBFREE_MODE);
7753         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7754         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7755         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7756         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7757         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7758         GET_REG32_1(RX_CPU_MODE);
7759         GET_REG32_1(RX_CPU_STATE);
7760         GET_REG32_1(RX_CPU_PGMCTR);
7761         GET_REG32_1(RX_CPU_HWBKPT);
7762         GET_REG32_1(TX_CPU_MODE);
7763         GET_REG32_1(TX_CPU_STATE);
7764         GET_REG32_1(TX_CPU_PGMCTR);
7765         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7766         GET_REG32_LOOP(FTQ_RESET, 0x120);
7767         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7768         GET_REG32_1(DMAC_MODE);
7769         GET_REG32_LOOP(GRC_MODE, 0x4c);
7770         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7771                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7772
7773 #undef __GET_REG32
7774 #undef GET_REG32_LOOP
7775 #undef GET_REG32_1
7776
7777         tg3_full_unlock(tp);
7778 }
7779
7780 #if (LINUX_VERSION_CODE >= 0x20418)
7781 static int tg3_get_eeprom_len(struct net_device *dev)
7782 {
7783         struct tg3 *tp = netdev_priv(dev);
7784
7785         return tp->nvram_size;
7786 }
7787 #endif
7788
7789 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7790 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7791
7792 #ifdef ETHTOOL_GEEPROM
7793 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7794 {
7795         struct tg3 *tp = netdev_priv(dev);
7796         int ret;
7797         u8  *pd;
7798         u32 i, offset, len, val, b_offset, b_count;
7799
7800         if (tp->link_config.phy_is_low_power)
7801                 return -EAGAIN;
7802
7803         offset = eeprom->offset;
7804         len = eeprom->len;
7805         eeprom->len = 0;
7806
7807         eeprom->magic = TG3_EEPROM_MAGIC;
7808
7809         if (offset & 3) {
7810                 /* adjustments to start on required 4 byte boundary */
7811                 b_offset = offset & 3;
7812                 b_count = 4 - b_offset;
7813                 if (b_count > len) {
7814                         /* i.e. offset=1 len=2 */
7815                         b_count = len;
7816                 }
7817                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7818                 if (ret)
7819                         return ret;
7820                 val = cpu_to_le32(val);
7821                 memcpy(data, ((char*)&val) + b_offset, b_count);
7822                 len -= b_count;
7823                 offset += b_count;
7824                 eeprom->len += b_count;
7825         }
7826
7827         /* read bytes upto the last 4 byte boundary */
7828         pd = &data[eeprom->len];
7829         for (i = 0; i < (len - (len & 3)); i += 4) {
7830                 ret = tg3_nvram_read(tp, offset + i, &val);
7831                 if (ret) {
7832                         eeprom->len += i;
7833                         return ret;
7834                 }
7835                 val = cpu_to_le32(val);
7836                 memcpy(pd + i, &val, 4);
7837         }
7838         eeprom->len += i;
7839
7840         if (len & 3) {
7841                 /* read last bytes not ending on 4 byte boundary */
7842                 pd = &data[eeprom->len];
7843                 b_count = len & 3;
7844                 b_offset = offset + len - b_count;
7845                 ret = tg3_nvram_read(tp, b_offset, &val);
7846                 if (ret)
7847                         return ret;
7848                 val = cpu_to_le32(val);
7849                 memcpy(pd, ((char*)&val), b_count);
7850                 eeprom->len += b_count;
7851         }
7852         return 0;
7853 }
7854 #endif
7855
7856 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7857
7858 #ifdef ETHTOOL_SEEPROM
7859 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7860 {
7861         struct tg3 *tp = netdev_priv(dev);
7862         int ret;
7863         u32 offset, len, b_offset, odd_len, start, end;
7864         u8 *buf;
7865
7866         if (tp->link_config.phy_is_low_power)
7867                 return -EAGAIN;
7868
7869         if (eeprom->magic != TG3_EEPROM_MAGIC)
7870                 return -EINVAL;
7871
7872         offset = eeprom->offset;
7873         len = eeprom->len;
7874
7875         if ((b_offset = (offset & 3))) {
7876                 /* adjustments to start on required 4 byte boundary */
7877                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7878                 if (ret)
7879                         return ret;
7880                 start = cpu_to_le32(start);
7881                 len += b_offset;
7882                 offset &= ~3;
7883                 if (len < 4)
7884                         len = 4;
7885         }
7886
7887         odd_len = 0;
7888         if (len & 3) {
7889                 /* adjustments to end on required 4 byte boundary */
7890                 odd_len = 1;
7891                 len = (len + 3) & ~3;
7892                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7893                 if (ret)
7894                         return ret;
7895                 end = cpu_to_le32(end);
7896         }
7897
7898         buf = data;
7899         if (b_offset || odd_len) {
7900                 buf = kmalloc(len, GFP_KERNEL);
7901                 if (buf == 0)
7902                         return -ENOMEM;
7903                 if (b_offset)
7904                         memcpy(buf, &start, 4);
7905                 if (odd_len)
7906                         memcpy(buf+len-4, &end, 4);
7907                 memcpy(buf + b_offset, data, eeprom->len);
7908         }
7909
7910         ret = tg3_nvram_write_block(tp, offset, len, buf);
7911
7912         if (buf != data)
7913                 kfree(buf);
7914
7915         return ret;
7916 }
7917 #endif
7918
7919 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7920 {
7921         struct tg3 *tp = netdev_priv(dev);
7922   
7923         cmd->supported = (SUPPORTED_Autoneg);
7924
7925         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7926                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7927                                    SUPPORTED_1000baseT_Full);
7928
7929         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7930                 cmd->supported |= (SUPPORTED_100baseT_Half |
7931                                   SUPPORTED_100baseT_Full |
7932                                   SUPPORTED_10baseT_Half |
7933                                   SUPPORTED_10baseT_Full |
7934                                   SUPPORTED_MII);
7935                 cmd->port = PORT_TP;
7936         } else {
7937                 cmd->supported |= SUPPORTED_FIBRE;
7938                 cmd->port = PORT_FIBRE;
7939         }
7940   
7941         cmd->advertising = tp->link_config.advertising;
7942         if (netif_running(dev)) {
7943                 cmd->speed = tp->link_config.active_speed;
7944                 cmd->duplex = tp->link_config.active_duplex;
7945         }
7946         cmd->phy_address = PHY_ADDR;
7947         cmd->transceiver = 0;
7948         cmd->autoneg = tp->link_config.autoneg;
7949         cmd->maxtxpkt = 0;
7950         cmd->maxrxpkt = 0;
7951         return 0;
7952 }
7953   
7954 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7955 {
7956         struct tg3 *tp = netdev_priv(dev);
7957   
7958         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7959                 /* These are the only valid advertisement bits allowed.  */
7960                 if (cmd->autoneg == AUTONEG_ENABLE &&
7961                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7962                                           ADVERTISED_1000baseT_Full |
7963                                           ADVERTISED_Autoneg |
7964                                           ADVERTISED_FIBRE)))
7965                         return -EINVAL;
7966                 /* Fiber can only do SPEED_1000.  */
7967                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7968                          (cmd->speed != SPEED_1000))
7969                         return -EINVAL;
7970         /* Copper cannot force SPEED_1000.  */
7971         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7972                    (cmd->speed == SPEED_1000))
7973                 return -EINVAL;
7974         else if ((cmd->speed == SPEED_1000) &&
7975                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7976                 return -EINVAL;
7977
7978         tg3_full_lock(tp, 0);
7979
7980         tp->link_config.autoneg = cmd->autoneg;
7981         if (cmd->autoneg == AUTONEG_ENABLE) {
7982                 tp->link_config.advertising = cmd->advertising;
7983                 tp->link_config.speed = SPEED_INVALID;
7984                 tp->link_config.duplex = DUPLEX_INVALID;
7985         } else {
7986                 tp->link_config.advertising = 0;
7987                 tp->link_config.speed = cmd->speed;
7988                 tp->link_config.duplex = cmd->duplex;
7989         }
7990   
7991         if (netif_running(dev))
7992                 tg3_setup_phy(tp, 1);
7993
7994         tg3_full_unlock(tp);
7995   
7996         return 0;
7997 }
7998   
7999 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8000 {
8001         struct tg3 *tp = netdev_priv(dev);
8002   
8003         strcpy(info->driver, DRV_MODULE_NAME);
8004         strcpy(info->version, DRV_MODULE_VERSION);
8005         strcpy(info->fw_version, tp->fw_ver);
8006         strcpy(info->bus_info, pci_name(tp->pdev));
8007 }
8008   
8009 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8010 {
8011         struct tg3 *tp = netdev_priv(dev);
8012   
8013         wol->supported = WAKE_MAGIC;
8014         wol->wolopts = 0;
8015         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8016                 wol->wolopts = WAKE_MAGIC;
8017         memset(&wol->sopass, 0, sizeof(wol->sopass));
8018 }
8019   
8020 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8021 {
8022         struct tg3 *tp = netdev_priv(dev);
8023   
8024         if (wol->wolopts & ~WAKE_MAGIC)
8025                 return -EINVAL;
8026         if ((wol->wolopts & WAKE_MAGIC) &&
8027             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
8028             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8029                 return -EINVAL;
8030   
8031         spin_lock_bh(&tp->lock);
8032         if (wol->wolopts & WAKE_MAGIC)
8033                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8034         else
8035                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8036         spin_unlock_bh(&tp->lock);
8037   
8038         return 0;
8039 }
8040   
8041 static u32 tg3_get_msglevel(struct net_device *dev)
8042 {
8043         struct tg3 *tp = netdev_priv(dev);
8044         return tp->msg_enable;
8045 }
8046   
8047 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8048 {
8049         struct tg3 *tp = netdev_priv(dev);
8050         tp->msg_enable = value;
8051 }
8052   
8053 #if TG3_TSO_SUPPORT != 0
8054 static int tg3_set_tso(struct net_device *dev, u32 value)
8055 {
8056         struct tg3 *tp = netdev_priv(dev);
8057
8058         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8059                 if (value)
8060                         return -EINVAL;
8061                 return 0;
8062         }
8063         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
8064                 if (value)
8065                         dev->features |= NETIF_F_TSO6;
8066                 else
8067                         dev->features &= ~NETIF_F_TSO6;
8068         }
8069         return ethtool_op_set_tso(dev, value);
8070 }
8071 #endif
8072   
8073 static int tg3_nway_reset(struct net_device *dev)
8074 {
8075         struct tg3 *tp = netdev_priv(dev);
8076         u32 bmcr;
8077         int r;
8078   
8079         if (!netif_running(dev))
8080                 return -EAGAIN;
8081
8082         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8083                 return -EINVAL;
8084
8085         spin_lock_bh(&tp->lock);
8086         r = -EINVAL;
8087         tg3_readphy(tp, MII_BMCR, &bmcr);
8088         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8089             ((bmcr & BMCR_ANENABLE) ||
8090              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8091                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8092                                            BMCR_ANENABLE);
8093                 r = 0;
8094         }
8095         spin_unlock_bh(&tp->lock);
8096   
8097         return r;
8098 }
8099   
8100 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8101 {
8102         struct tg3 *tp = netdev_priv(dev);
8103   
8104         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8105         ering->rx_mini_max_pending = 0;
8106         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8107                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8108         else
8109                 ering->rx_jumbo_max_pending = 0;
8110
8111         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8112
8113         ering->rx_pending = tp->rx_pending;
8114         ering->rx_mini_pending = 0;
8115         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8116                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8117         else
8118                 ering->rx_jumbo_pending = 0;
8119
8120         ering->tx_pending = tp->tx_pending;
8121 }
8122   
8123 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8124 {
8125         struct tg3 *tp = netdev_priv(dev);
8126         int irq_sync = 0, err = 0;
8127   
8128         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8129             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8130             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8131                 return -EINVAL;
8132   
8133         if (netif_running(dev)) {
8134                 tg3_netif_stop(tp);
8135                 irq_sync = 1;
8136         }
8137
8138         tg3_full_lock(tp, irq_sync);
8139   
8140         tp->rx_pending = ering->rx_pending;
8141
8142         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8143             tp->rx_pending > 63)
8144                 tp->rx_pending = 63;
8145         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8146         tp->tx_pending = ering->tx_pending;
8147
8148         if (netif_running(dev)) {
8149                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8150                 err = tg3_restart_hw(tp, 1);
8151                 if (!err)
8152                         tg3_netif_start(tp);
8153         }
8154
8155         tg3_full_unlock(tp);
8156   
8157         return err;
8158 }
8159   
8160 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8161 {
8162         struct tg3 *tp = netdev_priv(dev);
8163   
8164         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8165         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8166         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8167 }
8168   
8169 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8170 {
8171         struct tg3 *tp = netdev_priv(dev);
8172         int irq_sync = 0, err = 0;
8173   
8174         if (netif_running(dev)) {
8175                 tg3_netif_stop(tp);
8176                 irq_sync = 1;
8177         }
8178
8179         tg3_full_lock(tp, irq_sync);
8180
8181         if (epause->autoneg)
8182                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8183         else
8184                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8185         if (epause->rx_pause)
8186                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8187         else
8188                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8189         if (epause->tx_pause)
8190                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8191         else
8192                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8193
8194         if (netif_running(dev)) {
8195                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8196                 err = tg3_restart_hw(tp, 1);
8197                 if (!err)
8198                         tg3_netif_start(tp);
8199         }
8200
8201         tg3_full_unlock(tp);
8202   
8203         return err;
8204 }
8205   
8206 static u32 tg3_get_rx_csum(struct net_device *dev)
8207 {
8208         struct tg3 *tp = netdev_priv(dev);
8209         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8210 }
8211   
8212 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8213 {
8214         struct tg3 *tp = netdev_priv(dev);
8215   
8216         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8217                 if (data != 0)
8218                         return -EINVAL;
8219                 return 0;
8220         }
8221   
8222         spin_lock_bh(&tp->lock);
8223         if (data)
8224                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8225         else
8226                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8227         spin_unlock_bh(&tp->lock);
8228   
8229         return 0;
8230 }
8231   
8232 #if (LINUX_VERSION_CODE >= 0x20418)
8233 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8234 {
8235         struct tg3 *tp = netdev_priv(dev);
8236   
8237         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8238                 if (data != 0)
8239                         return -EINVAL;
8240                 return 0;
8241         }
8242   
8243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8245 #if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
8246                 tg3_set_tx_hw_csum(dev, data);
8247 #else
8248                 ethtool_op_set_tx_hw_csum(dev, data);
8249 #endif
8250         else
8251                 ethtool_op_set_tx_csum(dev, data);
8252
8253         return 0;
8254 }
8255 #endif
8256
8257 static int tg3_get_stats_count (struct net_device *dev)
8258 {
8259         return TG3_NUM_STATS;
8260 }
8261
8262 static int tg3_get_test_count (struct net_device *dev)
8263 {
8264         return TG3_NUM_TEST;
8265 }
8266
8267 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8268 {
8269         switch (stringset) {
8270         case ETH_SS_STATS:
8271                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8272                 break;
8273         case ETH_SS_TEST:
8274                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8275                 break;
8276         default:
8277                 WARN_ON(1);     /* we need a WARN() */
8278                 break;
8279         }
8280 }
8281
8282 static int tg3_phys_id(struct net_device *dev, u32 data)
8283 {
8284         struct tg3 *tp = netdev_priv(dev);
8285         int i;
8286
8287         if (!netif_running(tp->dev))
8288                 return -EAGAIN;
8289
8290         if (data == 0)
8291                 data = 2;
8292
8293         for (i = 0; i < (data * 2); i++) {
8294                 if ((i % 2) == 0)
8295                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8296                                            LED_CTRL_1000MBPS_ON |
8297                                            LED_CTRL_100MBPS_ON |
8298                                            LED_CTRL_10MBPS_ON |
8299                                            LED_CTRL_TRAFFIC_OVERRIDE |
8300                                            LED_CTRL_TRAFFIC_BLINK |
8301                                            LED_CTRL_TRAFFIC_LED);
8302         
8303                 else
8304                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8305                                            LED_CTRL_TRAFFIC_OVERRIDE);
8306 #if (LINUX_VERSION_CODE < 0x20609)
8307                 set_current_state(TASK_INTERRUPTIBLE);
8308                 if (schedule_timeout(HZ / 2))
8309 #else
8310                 if (msleep_interruptible(500))
8311 #endif
8312                         break;
8313         }
8314         tw32(MAC_LED_CTRL, tp->led_ctrl);
8315         return 0;
8316 }
8317
8318 static void tg3_get_ethtool_stats (struct net_device *dev,
8319                                    struct ethtool_stats *estats, u64 *tmp_stats)
8320 {
8321         struct tg3 *tp = netdev_priv(dev);
8322         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8323 }
8324
8325 #define NVRAM_TEST_SIZE 0x100
8326 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8327
8328 static int tg3_test_nvram(struct tg3 *tp)
8329 {
8330         u32 *buf, csum, magic;
8331         int i, j, err = 0, size;
8332
8333         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8334                 return -EIO;
8335
8336         if (magic == TG3_EEPROM_MAGIC)
8337                 size = NVRAM_TEST_SIZE;
8338         else if ((magic & 0xff000000) == 0xa5000000) {
8339                 if ((magic & 0xe00000) == 0x200000)
8340                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8341                 else
8342                         return 0;
8343         } else
8344                 return -EIO;
8345
8346         buf = kmalloc(size, GFP_KERNEL);
8347         if (buf == NULL)
8348                 return -ENOMEM;
8349
8350         err = -EIO;
8351         for (i = 0, j = 0; i < size; i += 4, j++) {
8352                 u32 val;
8353
8354                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8355                         break;
8356                 buf[j] = cpu_to_le32(val);
8357         }
8358         if (i < size)
8359                 goto out;
8360
8361         /* Selfboot format */
8362         if ((cpu_to_be32(buf[0]) & 0xff000000) == 0xa5000000) {
8363                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8364
8365                 for (i = 0; i < size; i++)
8366                         csum8 += buf8[i];
8367
8368                 if (csum8 == 0) {
8369                         err = 0;
8370                         goto out;
8371                 }
8372
8373                 err = -EIO;
8374                 goto out;
8375         }
8376
8377         /* Bootstrap checksum at offset 0x10 */
8378         csum = calc_crc((unsigned char *) buf, 0x10);
8379         if(csum != cpu_to_le32(buf[0x10/4]))
8380                 goto out;
8381
8382         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8383         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8384         if (csum != cpu_to_le32(buf[0xfc/4]))
8385                  goto out;
8386
8387         err = 0;
8388
8389 out:
8390         kfree(buf);
8391         return err;
8392 }
8393
8394 #define TG3_SERDES_TIMEOUT_SEC  2
8395 #define TG3_COPPER_TIMEOUT_SEC  7
8396
8397 static int tg3_test_link(struct tg3 *tp)
8398 {
8399         int i, max;
8400
8401         if (!netif_running(tp->dev))
8402                 return -ENODEV;
8403
8404         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8405                 max = TG3_SERDES_TIMEOUT_SEC;
8406         else
8407                 max = TG3_COPPER_TIMEOUT_SEC;
8408
8409         for (i = 0; i < max; i++) {
8410                 if (netif_carrier_ok(tp->dev))
8411                         return 0;
8412
8413 #if (LINUX_VERSION_CODE < 0x20609)
8414                 set_current_state(TASK_INTERRUPTIBLE);
8415                 if (schedule_timeout(HZ))
8416 #else
8417                 if (msleep_interruptible(1000))
8418 #endif
8419                         break;
8420         }
8421
8422         return -EIO;
8423 }
8424
8425 /* Only test the commonly used registers */
8426 static int tg3_test_registers(struct tg3 *tp)
8427 {
8428         int i, is_5705, is_5750;
8429         u32 offset, read_mask, write_mask, val, save_val, read_val;
8430         static struct {
8431                 u16 offset;
8432                 u16 flags;
8433 #define TG3_FL_5705     0x1
8434 #define TG3_FL_NOT_5705 0x2
8435 #define TG3_FL_NOT_5788 0x4
8436 #define TG3_FL_NOT_5750 0x8
8437                 u32 read_mask;
8438                 u32 write_mask;
8439         } reg_tbl[] = {
8440                 /* MAC Control Registers */
8441                 { MAC_MODE, TG3_FL_NOT_5705,
8442                         0x00000000, 0x00ef6f8c },
8443                 { MAC_MODE, TG3_FL_5705,
8444                         0x00000000, 0x01ef6b8c },
8445                 { MAC_STATUS, TG3_FL_NOT_5705,
8446                         0x03800107, 0x00000000 },
8447                 { MAC_STATUS, TG3_FL_5705,
8448                         0x03800100, 0x00000000 },
8449                 { MAC_ADDR_0_HIGH, 0x0000,
8450                         0x00000000, 0x0000ffff },
8451                 { MAC_ADDR_0_LOW, 0x0000,
8452                         0x00000000, 0xffffffff },
8453                 { MAC_RX_MTU_SIZE, 0x0000,
8454                         0x00000000, 0x0000ffff },
8455                 { MAC_TX_MODE, 0x0000,
8456                         0x00000000, 0x00000070 },
8457                 { MAC_TX_LENGTHS, 0x0000,
8458                         0x00000000, 0x00003fff },
8459                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8460                         0x00000000, 0x000007fc },
8461                 { MAC_RX_MODE, TG3_FL_5705,
8462                         0x00000000, 0x000007dc },
8463                 { MAC_HASH_REG_0, 0x0000,
8464                         0x00000000, 0xffffffff },
8465                 { MAC_HASH_REG_1, 0x0000,
8466                         0x00000000, 0xffffffff },
8467                 { MAC_HASH_REG_2, 0x0000,
8468                         0x00000000, 0xffffffff },
8469                 { MAC_HASH_REG_3, 0x0000,
8470                         0x00000000, 0xffffffff },
8471
8472                 /* Receive Data and Receive BD Initiator Control Registers. */
8473                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8474                         0x00000000, 0xffffffff },
8475                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8476                         0x00000000, 0xffffffff },
8477                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8478                         0x00000000, 0x00000003 },
8479                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8480                         0x00000000, 0xffffffff },
8481                 { RCVDBDI_STD_BD+0, 0x0000,
8482                         0x00000000, 0xffffffff },
8483                 { RCVDBDI_STD_BD+4, 0x0000,
8484                         0x00000000, 0xffffffff },
8485                 { RCVDBDI_STD_BD+8, 0x0000,
8486                         0x00000000, 0xffff0002 },
8487                 { RCVDBDI_STD_BD+0xc, 0x0000,
8488                         0x00000000, 0xffffffff },
8489         
8490                 /* Receive BD Initiator Control Registers. */
8491                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8492                         0x00000000, 0xffffffff },
8493                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8494                         0x00000000, 0x000003ff },
8495                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8496                         0x00000000, 0xffffffff },
8497         
8498                 /* Host Coalescing Control Registers. */
8499                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8500                         0x00000000, 0x00000004 },
8501                 { HOSTCC_MODE, TG3_FL_5705,
8502                         0x00000000, 0x000000f6 },
8503                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8504                         0x00000000, 0xffffffff },
8505                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8506                         0x00000000, 0x000003ff },
8507                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8508                         0x00000000, 0xffffffff },
8509                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8510                         0x00000000, 0x000003ff },
8511                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8512                         0x00000000, 0xffffffff },
8513                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8514                         0x00000000, 0x000000ff },
8515                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8516                         0x00000000, 0xffffffff },
8517                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8518                         0x00000000, 0x000000ff },
8519                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8520                         0x00000000, 0xffffffff },
8521                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8522                         0x00000000, 0xffffffff },
8523                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8524                         0x00000000, 0xffffffff },
8525                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8526                         0x00000000, 0x000000ff },
8527                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8528                         0x00000000, 0xffffffff },
8529                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8530                         0x00000000, 0x000000ff },
8531                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8532                         0x00000000, 0xffffffff },
8533                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8534                         0x00000000, 0xffffffff },
8535                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8536                         0x00000000, 0xffffffff },
8537                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8538                         0x00000000, 0xffffffff },
8539                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8540                         0x00000000, 0xffffffff },
8541                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8542                         0xffffffff, 0x00000000 },
8543                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8544                         0xffffffff, 0x00000000 },
8545
8546                 /* Buffer Manager Control Registers. */
8547                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8548                         0x00000000, 0x007fff80 },
8549                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8550                         0x00000000, 0x007fffff },
8551                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8552                         0x00000000, 0x0000003f },
8553                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8554                         0x00000000, 0x000001ff },
8555                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8556                         0x00000000, 0x000001ff },
8557                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8558                         0xffffffff, 0x00000000 },
8559                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8560                         0xffffffff, 0x00000000 },
8561         
8562                 /* Mailbox Registers */
8563                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8564                         0x00000000, 0x000001ff },
8565                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8566                         0x00000000, 0x000001ff },
8567                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8568                         0x00000000, 0x000007ff },
8569                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8570                         0x00000000, 0x000001ff },
8571
8572                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8573         };
8574
8575         is_5705 = 0;
8576         is_5750 = 0;
8577         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8578                 is_5705 = 1;
8579                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8580                         is_5750 = 1;
8581         }
8582
8583         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8584                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8585                         continue;
8586
8587                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8588                         continue;
8589
8590                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8591                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8592                         continue;
8593
8594                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8595                         continue;
8596
8597                 offset = (u32) reg_tbl[i].offset;
8598                 read_mask = reg_tbl[i].read_mask;
8599                 write_mask = reg_tbl[i].write_mask;
8600
8601                 /* Save the original register content */
8602                 save_val = tr32(offset);
8603
8604                 /* Determine the read-only value. */
8605                 read_val = save_val & read_mask;
8606
8607                 /* Write zero to the register, then make sure the read-only bits
8608                  * are not changed and the read/write bits are all zeros.
8609                  */
8610                 tw32(offset, 0);
8611
8612                 val = tr32(offset);
8613
8614                 /* Test the read-only and read/write bits. */
8615                 if (((val & read_mask) != read_val) || (val & write_mask))
8616                         goto out;
8617
8618                 /* Write ones to all the bits defined by RdMask and WrMask, then
8619                  * make sure the read-only bits are not changed and the
8620                  * read/write bits are all ones.
8621                  */
8622                 tw32(offset, read_mask | write_mask);
8623
8624                 val = tr32(offset);
8625
8626                 /* Test the read-only bits. */
8627                 if ((val & read_mask) != read_val)
8628                         goto out;
8629
8630                 /* Test the read/write bits. */
8631                 if ((val & write_mask) != write_mask)
8632                         goto out;
8633
8634                 tw32(offset, save_val);
8635         }
8636
8637         return 0;
8638
8639 out:
8640         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8641         tw32(offset, save_val);
8642         return -EIO;
8643 }
8644
8645 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8646 {
8647         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8648         int i;
8649         u32 j;
8650
8651         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8652                 for (j = 0; j < len; j += 4) {
8653                         u32 val;
8654
8655                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8656                         tg3_read_mem(tp, offset + j, &val);
8657                         if (val != test_pattern[i])
8658                                 return -EIO;
8659                 }
8660         }
8661         return 0;
8662 }
8663
8664 static int tg3_test_memory(struct tg3 *tp)
8665 {
8666         static struct mem_entry {
8667                 u32 offset;
8668                 u32 len;
8669         } mem_tbl_570x[] = {
8670                 { 0x00000000, 0x00b50},
8671                 { 0x00002000, 0x1c000},
8672                 { 0xffffffff, 0x00000}
8673         }, mem_tbl_5705[] = {
8674                 { 0x00000100, 0x0000c},
8675                 { 0x00000200, 0x00008},
8676                 { 0x00004000, 0x00800},
8677                 { 0x00006000, 0x01000},
8678                 { 0x00008000, 0x02000},
8679                 { 0x00010000, 0x0e000},
8680                 { 0xffffffff, 0x00000}
8681         }, mem_tbl_5755[] = {
8682                 { 0x00000200, 0x00008},
8683                 { 0x00004000, 0x00800},
8684                 { 0x00006000, 0x00800},
8685                 { 0x00008000, 0x02000},
8686                 { 0x00010000, 0x0c000},
8687                 { 0xffffffff, 0x00000}
8688         };
8689         struct mem_entry *mem_tbl;
8690         int err = 0;
8691         int i;
8692
8693         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8694                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8695                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8696                         mem_tbl = mem_tbl_5755;
8697                 else
8698                         mem_tbl = mem_tbl_5705;
8699         } else
8700                 mem_tbl = mem_tbl_570x;
8701
8702         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8703                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8704                     mem_tbl[i].len)) != 0)
8705                         break;
8706         }
8707         
8708         return err;
8709 }
8710
8711 #define TG3_MAC_LOOPBACK        0
8712 #define TG3_PHY_LOOPBACK        1
8713
8714 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8715 {
8716         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8717         u32 desc_idx;
8718         struct sk_buff *skb, *rx_skb;
8719         u8 *tx_data;
8720         dma_addr_t map;
8721         int num_pkts, tx_len, rx_len, i, err;
8722         struct tg3_rx_buffer_desc *desc;
8723
8724         if (loopback_mode == TG3_MAC_LOOPBACK) {
8725                 /* HW errata - mac loopback fails in some cases on 5780.
8726                  * Normal traffic and PHY loopback are not affected by
8727                  * errata.
8728                  */
8729                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8730                         return 0;
8731
8732                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8733                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8734                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8735                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8736                 else
8737                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8738                 tw32(MAC_MODE, mac_mode);
8739         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8740                 u32 val;
8741
8742                 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8743                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8744                         val |= BMCR_SPEED100;
8745                 else
8746                         val |= BMCR_SPEED1000;
8747
8748                 tg3_writephy(tp, MII_BMCR, val);
8749                 udelay(40);
8750
8751                 /* reset to prevent losing 1st rx packet intermittently */
8752                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8753                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8754                         udelay(10);
8755                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8756                 }
8757                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8758                            MAC_MODE_LINK_POLARITY;
8759                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8760                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8761                 else
8762                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8763                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8764                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8765                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8766                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8767                 }
8768                 tw32(MAC_MODE, mac_mode);
8769         }
8770         else
8771                 return -EINVAL;
8772
8773         err = -EIO;
8774
8775         tx_len = 1514;
8776         skb = netdev_alloc_skb(tp->dev, tx_len);
8777         if (!skb)
8778                 return -ENOMEM;
8779
8780         tx_data = skb_put(skb, tx_len);
8781         memcpy(tx_data, tp->dev->dev_addr, 6);
8782         memset(tx_data + 6, 0x0, 8);
8783
8784         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8785
8786         for (i = 14; i < tx_len; i++)
8787                 tx_data[i] = (u8) (i & 0xff);
8788
8789         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8790
8791         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8792              HOSTCC_MODE_NOW);
8793
8794         udelay(10);
8795
8796         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8797
8798         num_pkts = 0;
8799
8800         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8801
8802         tp->tx_prod++;
8803         num_pkts++;
8804
8805         /* Some platforms need to sync memory here */
8806         wmb();
8807
8808         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8809                      tp->tx_prod);
8810         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8811
8812         udelay(10);
8813
8814         for (i = 0; i < 50; i++) {
8815                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8816                        HOSTCC_MODE_NOW);
8817
8818                 udelay(10);
8819
8820                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8821                 rx_idx = tp->hw_status->idx[0].rx_producer;
8822                 if ((tx_idx == tp->tx_prod) &&
8823                     (rx_idx == (rx_start_idx + num_pkts)))
8824                         break;
8825         }
8826
8827         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8828         dev_kfree_skb(skb);
8829
8830         if (tx_idx != tp->tx_prod)
8831                 goto out;
8832
8833         if (rx_idx != rx_start_idx + num_pkts)
8834                 goto out;
8835
8836         desc = &tp->rx_rcb[rx_start_idx];
8837         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8838         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8839         if (opaque_key != RXD_OPAQUE_RING_STD)
8840                 goto out;
8841
8842         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8843             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8844                 goto out;
8845
8846         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8847         if (rx_len != tx_len)
8848                 goto out;
8849
8850         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8851
8852         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8853         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8854
8855         for (i = 14; i < tx_len; i++) {
8856                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8857                         goto out;
8858         }
8859         err = 0;
8860         
8861         /* tg3_free_rings will unmap and free the rx_skb */
8862 out:
8863         return err;
8864 }
8865
8866 #define TG3_MAC_LOOPBACK_FAILED         1
8867 #define TG3_PHY_LOOPBACK_FAILED         2
8868 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8869                                          TG3_PHY_LOOPBACK_FAILED)
8870
8871 static int tg3_test_loopback(struct tg3 *tp)
8872 {
8873         int err = 0;
8874
8875         if (!netif_running(tp->dev))
8876                 return TG3_LOOPBACK_FAILED;
8877
8878         err = tg3_reset_hw(tp, 1);
8879         if (err)
8880                 return TG3_LOOPBACK_FAILED;
8881
8882         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8883                 err |= TG3_MAC_LOOPBACK_FAILED;
8884         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8885                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8886                         err |= TG3_PHY_LOOPBACK_FAILED;
8887         }
8888
8889         return err;
8890 }
8891
8892 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8893                           u64 *data)
8894 {
8895         struct tg3 *tp = netdev_priv(dev);
8896
8897         if (tp->link_config.phy_is_low_power)
8898                 tg3_set_power_state(tp, PCI_D0);
8899
8900         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8901
8902         if (tg3_test_nvram(tp) != 0) {
8903                 etest->flags |= ETH_TEST_FL_FAILED;
8904                 data[0] = 1;
8905         }
8906         if (tg3_test_link(tp) != 0) {
8907                 etest->flags |= ETH_TEST_FL_FAILED;
8908                 data[1] = 1;
8909         }
8910         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8911                 int err, irq_sync = 0;
8912
8913                 if (netif_running(dev)) {
8914                         tg3_netif_stop(tp);
8915                         irq_sync = 1;
8916                 }
8917
8918                 tg3_full_lock(tp, irq_sync);
8919
8920                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8921                 err = tg3_nvram_lock(tp);
8922                 tg3_halt_cpu(tp, RX_CPU_BASE);
8923                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8924                         tg3_halt_cpu(tp, TX_CPU_BASE);
8925                 if (!err)
8926                         tg3_nvram_unlock(tp);
8927
8928                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8929                         tg3_phy_reset(tp);
8930
8931                 if (tg3_test_registers(tp) != 0) {
8932                         etest->flags |= ETH_TEST_FL_FAILED;
8933                         data[2] = 1;
8934                 }
8935                 if (tg3_test_memory(tp) != 0) {
8936                         etest->flags |= ETH_TEST_FL_FAILED;
8937                         data[3] = 1;
8938                 }
8939                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8940                         etest->flags |= ETH_TEST_FL_FAILED;
8941
8942                 tg3_full_unlock(tp);
8943
8944                 if (tg3_test_interrupt(tp) != 0) {
8945                         etest->flags |= ETH_TEST_FL_FAILED;
8946                         data[5] = 1;
8947                 }
8948
8949                 tg3_full_lock(tp, 0);
8950
8951                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8952                 if (netif_running(dev)) {
8953                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8954                         if (!tg3_restart_hw(tp, 1))
8955                                 tg3_netif_start(tp);
8956                 }
8957
8958                 tg3_full_unlock(tp);
8959         }
8960         if (tp->link_config.phy_is_low_power)
8961                 tg3_set_power_state(tp, PCI_D3hot);
8962
8963 }
8964
8965 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8966 {
8967 #if (LINUX_VERSION_CODE >= 0x020607)
8968         struct mii_ioctl_data *data = if_mii(ifr);
8969 #else
8970         struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
8971 #endif
8972         struct tg3 *tp = netdev_priv(dev);
8973         int err;
8974
8975         switch(cmd) {
8976         case SIOCGMIIPHY:
8977                 data->phy_id = PHY_ADDR;
8978
8979                 /* fallthru */
8980         case SIOCGMIIREG: {
8981                 u32 mii_regval;
8982
8983                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8984                         break;                  /* We have no PHY */
8985
8986                 if (tp->link_config.phy_is_low_power)
8987                         return -EAGAIN;
8988
8989                 spin_lock_bh(&tp->lock);
8990                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8991                 spin_unlock_bh(&tp->lock);
8992
8993                 data->val_out = mii_regval;
8994
8995                 return err;
8996         }
8997
8998         case SIOCSMIIREG:
8999                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9000                         break;                  /* We have no PHY */
9001
9002                 if (!capable(CAP_NET_ADMIN))
9003                         return -EPERM;
9004
9005                 if (tp->link_config.phy_is_low_power)
9006                         return -EAGAIN;
9007
9008                 spin_lock_bh(&tp->lock);
9009                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9010                 spin_unlock_bh(&tp->lock);
9011
9012                 return err;
9013
9014         default:
9015                 /* do nothing */
9016                 break;
9017         }
9018         return -EOPNOTSUPP;
9019 }
9020
9021 #if TG3_VLAN_TAG_USED
9022 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9023 {
9024         struct tg3 *tp = netdev_priv(dev);
9025
9026         if (netif_running(dev))
9027                 tg3_netif_stop(tp);
9028
9029         tg3_full_lock(tp, 0);
9030
9031         tp->vlgrp = grp;
9032
9033         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9034         __tg3_set_rx_mode(dev);
9035
9036         tg3_full_unlock(tp);
9037
9038         if (netif_running(dev))
9039                 tg3_netif_start(tp);
9040 }
9041
9042 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9043 {
9044         struct tg3 *tp = netdev_priv(dev);
9045
9046         if (netif_running(dev))
9047                 tg3_netif_stop(tp);
9048
9049         tg3_full_lock(tp, 0);
9050         if (tp->vlgrp)
9051                 tp->vlgrp->vlan_devices[vid] = NULL;
9052         tg3_full_unlock(tp);
9053
9054         if (netif_running(dev))
9055                 tg3_netif_start(tp);
9056 }
9057 #endif
9058
9059 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9060 {
9061         struct tg3 *tp = netdev_priv(dev);
9062
9063         memcpy(ec, &tp->coal, sizeof(*ec));
9064         return 0;
9065 }
9066
9067 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9068 {
9069         struct tg3 *tp = netdev_priv(dev);
9070         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9071         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9072
9073         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9074                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9075                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9076                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9077                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9078         }
9079
9080         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9081             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9082             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9083             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9084             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9085             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9086             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9087             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9088             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9089             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9090                 return -EINVAL;
9091
9092         /* No rx interrupts will be generated if both are zero */
9093         if ((ec->rx_coalesce_usecs == 0) &&
9094             (ec->rx_max_coalesced_frames == 0))
9095                 return -EINVAL;
9096
9097         /* No tx interrupts will be generated if both are zero */
9098         if ((ec->tx_coalesce_usecs == 0) &&
9099             (ec->tx_max_coalesced_frames == 0))
9100                 return -EINVAL;
9101
9102         /* Only copy relevant parameters, ignore all others. */
9103         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9104         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9105         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9106         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9107         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9108         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9109         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9110         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9111         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9112
9113         if (netif_running(dev)) {
9114                 tg3_full_lock(tp, 0);
9115                 __tg3_set_coalesce(tp, &tp->coal);
9116                 tg3_full_unlock(tp);
9117         }
9118         return 0;
9119 }
9120
9121 static struct ethtool_ops tg3_ethtool_ops = {
9122         .get_settings           = tg3_get_settings,
9123         .set_settings           = tg3_set_settings,
9124         .get_drvinfo            = tg3_get_drvinfo,
9125         .get_regs_len           = tg3_get_regs_len,
9126         .get_regs               = tg3_get_regs,
9127         .get_wol                = tg3_get_wol,
9128         .set_wol                = tg3_set_wol,
9129         .get_msglevel           = tg3_get_msglevel,
9130         .set_msglevel           = tg3_set_msglevel,
9131         .nway_reset             = tg3_nway_reset,
9132         .get_link               = ethtool_op_get_link,
9133 #if (LINUX_VERSION_CODE >= 0x20418)
9134         .get_eeprom_len         = tg3_get_eeprom_len,
9135 #endif
9136 #ifdef ETHTOOL_GEEPROM
9137         .get_eeprom             = tg3_get_eeprom,
9138 #endif
9139 #ifdef ETHTOOL_SEEPROM
9140         .set_eeprom             = tg3_set_eeprom,
9141 #endif
9142         .get_ringparam          = tg3_get_ringparam,
9143         .set_ringparam          = tg3_set_ringparam,
9144         .get_pauseparam         = tg3_get_pauseparam,
9145         .set_pauseparam         = tg3_set_pauseparam,
9146         .get_rx_csum            = tg3_get_rx_csum,
9147         .set_rx_csum            = tg3_set_rx_csum,
9148         .get_tx_csum            = ethtool_op_get_tx_csum,
9149 #if (LINUX_VERSION_CODE >= 0x20418)
9150         .set_tx_csum            = tg3_set_tx_csum,
9151 #endif
9152         .get_sg                 = ethtool_op_get_sg,
9153         .set_sg                 = ethtool_op_set_sg,
9154 #if TG3_TSO_SUPPORT != 0
9155         .get_tso                = ethtool_op_get_tso,
9156         .set_tso                = tg3_set_tso,
9157 #endif
9158         .self_test_count        = tg3_get_test_count,
9159         .self_test              = tg3_self_test,
9160         .get_strings            = tg3_get_strings,
9161         .phys_id                = tg3_phys_id,
9162         .get_stats_count        = tg3_get_stats_count,
9163         .get_ethtool_stats      = tg3_get_ethtool_stats,
9164         .get_coalesce           = tg3_get_coalesce,
9165         .set_coalesce           = tg3_set_coalesce,
9166 #ifdef ETHTOOL_GPERMADDR
9167         .get_perm_addr          = ethtool_op_get_perm_addr,
9168 #endif
9169 };
9170
9171 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9172 {
9173         u32 cursize, val, magic;
9174
9175         tp->nvram_size = EEPROM_CHIP_SIZE;
9176
9177         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9178                 return;
9179
9180         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9181                 return;
9182
9183         /*
9184          * Size the chip by reading offsets at increasing powers of two.
9185          * When we encounter our validation signature, we know the addressing
9186          * has wrapped around, and thus have our chip size.
9187          */
9188         cursize = 0x10;
9189
9190         while (cursize < tp->nvram_size) {
9191                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9192                         return;
9193
9194                 if (val == magic)
9195                         break;
9196
9197                 cursize <<= 1;
9198         }
9199
9200         tp->nvram_size = cursize;
9201 }
9202                 
9203 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9204 {
9205         u32 val;
9206
9207         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9208                 return;
9209
9210         /* Selfboot format */
9211         if (val != TG3_EEPROM_MAGIC) {
9212                 tg3_get_eeprom_size(tp);
9213                 return;
9214         }
9215
9216         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9217                 if (val != 0) {
9218                         tp->nvram_size = (val >> 16) * 1024;
9219                         return;
9220                 }
9221         }
9222         tp->nvram_size = 0x20000;
9223 }
9224
9225 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9226 {
9227         u32 nvcfg1;
9228
9229         nvcfg1 = tr32(NVRAM_CFG1);
9230         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9231                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9232         }
9233         else {
9234                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9235                 tw32(NVRAM_CFG1, nvcfg1);
9236         }
9237
9238         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9239             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9240                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9241                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9242                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9243                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9244                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9245                                 break;
9246                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9247                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9248                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9249                                 break;
9250                         case FLASH_VENDOR_ATMEL_EEPROM:
9251                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9252                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9253                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9254                                 break;
9255                         case FLASH_VENDOR_ST:
9256                                 tp->nvram_jedecnum = JEDEC_ST;
9257                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9258                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9259                                 break;
9260                         case FLASH_VENDOR_SAIFUN:
9261                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9262                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9263                                 break;
9264                         case FLASH_VENDOR_SST_SMALL:
9265                         case FLASH_VENDOR_SST_LARGE:
9266                                 tp->nvram_jedecnum = JEDEC_SST;
9267                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9268                                 break;
9269                 }
9270         }
9271         else {
9272                 tp->nvram_jedecnum = JEDEC_ATMEL;
9273                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9274                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9275         }
9276 }
9277
9278 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9279 {
9280         u32 nvcfg1;
9281
9282         nvcfg1 = tr32(NVRAM_CFG1);
9283
9284         /* NVRAM protection for TPM */
9285         if (nvcfg1 & (1 << 27))
9286                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9287
9288         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9289                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9290                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9291                         tp->nvram_jedecnum = JEDEC_ATMEL;
9292                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9293                         break;
9294                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9295                         tp->nvram_jedecnum = JEDEC_ATMEL;
9296                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9297                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9298                         break;
9299                 case FLASH_5752VENDOR_ST_M45PE10:
9300                 case FLASH_5752VENDOR_ST_M45PE20:
9301                 case FLASH_5752VENDOR_ST_M45PE40:
9302                         tp->nvram_jedecnum = JEDEC_ST;
9303                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9304                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9305                         break;
9306         }
9307
9308         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9309                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9310                         case FLASH_5752PAGE_SIZE_256:
9311                                 tp->nvram_pagesize = 256;
9312                                 break;
9313                         case FLASH_5752PAGE_SIZE_512:
9314                                 tp->nvram_pagesize = 512;
9315                                 break;
9316                         case FLASH_5752PAGE_SIZE_1K:
9317                                 tp->nvram_pagesize = 1024;
9318                                 break;
9319                         case FLASH_5752PAGE_SIZE_2K:
9320                                 tp->nvram_pagesize = 2048;
9321                                 break;
9322                         case FLASH_5752PAGE_SIZE_4K:
9323                                 tp->nvram_pagesize = 4096;
9324                                 break;
9325                         case FLASH_5752PAGE_SIZE_264:
9326                                 tp->nvram_pagesize = 264;
9327                                 break;
9328                 }
9329         }
9330         else {
9331                 /* For eeprom, set pagesize to maximum eeprom size */
9332                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9333
9334                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9335                 tw32(NVRAM_CFG1, nvcfg1);
9336         }
9337 }
9338
9339 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9340 {
9341         u32 nvcfg1;
9342
9343         nvcfg1 = tr32(NVRAM_CFG1);
9344
9345         /* NVRAM protection for TPM */
9346         if (nvcfg1 & (1 << 27))
9347                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9348
9349         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9350                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9351                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9352                         tp->nvram_jedecnum = JEDEC_ATMEL;
9353                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9354                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9355
9356                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9357                         tw32(NVRAM_CFG1, nvcfg1);
9358                         break;
9359                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9360                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9361                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9362                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9363                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9364                         tp->nvram_jedecnum = JEDEC_ATMEL;
9365                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9366                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9367                         tp->nvram_pagesize = 264;
9368                         break;
9369                 case FLASH_5752VENDOR_ST_M45PE10:
9370                 case FLASH_5752VENDOR_ST_M45PE20:
9371                 case FLASH_5752VENDOR_ST_M45PE40:
9372                         tp->nvram_jedecnum = JEDEC_ST;
9373                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9374                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9375                         tp->nvram_pagesize = 256;
9376                         break;
9377         }
9378 }
9379
9380 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9381 {
9382         u32 nvcfg1;
9383
9384         nvcfg1 = tr32(NVRAM_CFG1);
9385
9386         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9387                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9388                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9389                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9390                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9391                         tp->nvram_jedecnum = JEDEC_ATMEL;
9392                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9393                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9394
9395                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9396                         tw32(NVRAM_CFG1, nvcfg1);
9397                         break;
9398                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9399                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9400                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9401                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9402                         tp->nvram_jedecnum = JEDEC_ATMEL;
9403                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9404                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9405                         tp->nvram_pagesize = 264;
9406                         break;
9407                 case FLASH_5752VENDOR_ST_M45PE10:
9408                 case FLASH_5752VENDOR_ST_M45PE20:
9409                 case FLASH_5752VENDOR_ST_M45PE40:
9410                         tp->nvram_jedecnum = JEDEC_ST;
9411                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9412                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9413                         tp->nvram_pagesize = 256;
9414                         break;
9415         }
9416 }
9417
9418 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9419 static void __devinit tg3_nvram_init(struct tg3 *tp)
9420 {
9421         int j;
9422
9423         tw32_f(GRC_EEPROM_ADDR,
9424              (EEPROM_ADDR_FSM_RESET |
9425               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9426                EEPROM_ADDR_CLKPERD_SHIFT)));
9427
9428         /* XXX schedule_timeout() ... */
9429         for (j = 0; j < 100; j++)
9430                 udelay(10);
9431
9432         /* Enable seeprom accesses. */
9433         tw32_f(GRC_LOCAL_CTRL,
9434              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9435         udelay(100);
9436
9437         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9438             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9439                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9440
9441                 if (tg3_nvram_lock(tp)) {
9442                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9443                                "tg3_nvram_init failed.\n", tp->dev->name);
9444                         return;
9445                 }
9446                 tg3_enable_nvram_access(tp);
9447
9448                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9449                         tg3_get_5752_nvram_info(tp);
9450                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9451                         tg3_get_5755_nvram_info(tp);
9452                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9453                         tg3_get_5787_nvram_info(tp);
9454                 else
9455                         tg3_get_nvram_info(tp);
9456
9457                 tg3_get_nvram_size(tp);
9458
9459                 tg3_disable_nvram_access(tp);
9460                 tg3_nvram_unlock(tp);
9461
9462         } else {
9463                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9464
9465                 tg3_get_eeprom_size(tp);
9466         }
9467 }
9468
9469 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9470                                         u32 offset, u32 *val)
9471 {
9472         u32 tmp;
9473         int i;
9474
9475         if (offset > EEPROM_ADDR_ADDR_MASK ||
9476             (offset % 4) != 0)
9477                 return -EINVAL;
9478
9479         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9480                                         EEPROM_ADDR_DEVID_MASK |
9481                                         EEPROM_ADDR_READ);
9482         tw32(GRC_EEPROM_ADDR,
9483              tmp |
9484              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9485              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9486               EEPROM_ADDR_ADDR_MASK) |
9487              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9488
9489         for (i = 0; i < 10000; i++) {
9490                 tmp = tr32(GRC_EEPROM_ADDR);
9491
9492                 if (tmp & EEPROM_ADDR_COMPLETE)
9493                         break;
9494                 udelay(100);
9495         }
9496         if (!(tmp & EEPROM_ADDR_COMPLETE))
9497                 return -EBUSY;
9498
9499         *val = tr32(GRC_EEPROM_DATA);
9500         return 0;
9501 }
9502
9503 #define NVRAM_CMD_TIMEOUT 10000
9504
9505 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9506 {
9507         int i;
9508
9509         tw32(NVRAM_CMD, nvram_cmd);
9510         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9511                 udelay(10);
9512                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9513                         udelay(10);
9514                         break;
9515                 }
9516         }
9517         if (i == NVRAM_CMD_TIMEOUT) {
9518                 return -EBUSY;
9519         }
9520         return 0;
9521 }
9522
9523 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9524 {
9525         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9526             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9527             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9528             (tp->nvram_jedecnum == JEDEC_ATMEL))
9529
9530                 addr = ((addr / tp->nvram_pagesize) <<
9531                         ATMEL_AT45DB0X1B_PAGE_POS) +
9532                        (addr % tp->nvram_pagesize);
9533
9534         return addr;
9535 }
9536
9537 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9538 {
9539         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9540             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9541             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9542             (tp->nvram_jedecnum == JEDEC_ATMEL))
9543
9544                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9545                         tp->nvram_pagesize) +
9546                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9547
9548         return addr;
9549 }
9550
9551 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9552 {
9553         int ret;
9554
9555         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9556                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9557
9558         offset = tg3_nvram_phys_addr(tp, offset);
9559
9560         if (offset > NVRAM_ADDR_MSK)
9561                 return -EINVAL;
9562
9563         ret = tg3_nvram_lock(tp);
9564         if (ret)
9565                 return ret;
9566
9567         tg3_enable_nvram_access(tp);
9568
9569         tw32(NVRAM_ADDR, offset);
9570         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9571                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9572
9573         if (ret == 0)
9574                 *val = swab32(tr32(NVRAM_RDDATA));
9575
9576         tg3_disable_nvram_access(tp);
9577
9578         tg3_nvram_unlock(tp);
9579
9580         return ret;
9581 }
9582
9583 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9584 {
9585         int err;
9586         u32 tmp;
9587
9588         err = tg3_nvram_read(tp, offset, &tmp);
9589         *val = swab32(tmp);
9590         return err;
9591 }
9592
9593 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9594                                     u32 offset, u32 len, u8 *buf)
9595 {
9596         int i, j, rc = 0;
9597         u32 val;
9598
9599         for (i = 0; i < len; i += 4) {
9600                 u32 addr, data;
9601
9602                 addr = offset + i;
9603
9604                 memcpy(&data, buf + i, 4);
9605
9606                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9607
9608                 val = tr32(GRC_EEPROM_ADDR);
9609                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9610
9611                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9612                         EEPROM_ADDR_READ);
9613                 tw32(GRC_EEPROM_ADDR, val |
9614                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9615                         (addr & EEPROM_ADDR_ADDR_MASK) |
9616                         EEPROM_ADDR_START |
9617                         EEPROM_ADDR_WRITE);
9618                 
9619                 for (j = 0; j < 10000; j++) {
9620                         val = tr32(GRC_EEPROM_ADDR);
9621
9622                         if (val & EEPROM_ADDR_COMPLETE)
9623                                 break;
9624                         udelay(100);
9625                 }
9626                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9627                         rc = -EBUSY;
9628                         break;
9629                 }
9630         }
9631
9632         return rc;
9633 }
9634
9635 /* offset and length are dword aligned */
9636 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9637                 u8 *buf)
9638 {
9639         int ret = 0;
9640         u32 pagesize = tp->nvram_pagesize;
9641         u32 pagemask = pagesize - 1;
9642         u32 nvram_cmd;
9643         u8 *tmp;
9644
9645         tmp = kmalloc(pagesize, GFP_KERNEL);
9646         if (tmp == NULL)
9647                 return -ENOMEM;
9648
9649         while (len) {
9650                 int j;
9651                 u32 phy_addr, page_off, size;
9652
9653                 phy_addr = offset & ~pagemask;
9654         
9655                 for (j = 0; j < pagesize; j += 4) {
9656                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9657                                                 (u32 *) (tmp + j))))
9658                                 break;
9659                 }
9660                 if (ret)
9661                         break;
9662
9663                 page_off = offset & pagemask;
9664                 size = pagesize;
9665                 if (len < size)
9666                         size = len;
9667
9668                 len -= size;
9669
9670                 memcpy(tmp + page_off, buf, size);
9671
9672                 offset = offset + (pagesize - page_off);
9673
9674                 tg3_enable_nvram_access(tp);
9675
9676                 /*
9677                  * Before we can erase the flash page, we need
9678                  * to issue a special "write enable" command.
9679                  */
9680                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9681
9682                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9683                         break;
9684
9685                 /* Erase the target page */
9686                 tw32(NVRAM_ADDR, phy_addr);
9687
9688                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9689                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9690
9691                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9692                         break;
9693
9694                 /* Issue another write enable to start the write. */
9695                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9696
9697                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9698                         break;
9699
9700                 for (j = 0; j < pagesize; j += 4) {
9701                         u32 data;
9702
9703                         data = *((u32 *) (tmp + j));
9704                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9705
9706                         tw32(NVRAM_ADDR, phy_addr + j);
9707
9708                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9709                                 NVRAM_CMD_WR;
9710
9711                         if (j == 0)
9712                                 nvram_cmd |= NVRAM_CMD_FIRST;
9713                         else if (j == (pagesize - 4))
9714                                 nvram_cmd |= NVRAM_CMD_LAST;
9715
9716                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9717                                 break;
9718                 }
9719                 if (ret)
9720                         break;
9721         }
9722
9723         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9724         tg3_nvram_exec_cmd(tp, nvram_cmd);
9725
9726         kfree(tmp);
9727
9728         return ret;
9729 }
9730
9731 /* offset and length are dword aligned */
9732 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9733                 u8 *buf)
9734 {
9735         int i, ret = 0;
9736
9737         for (i = 0; i < len; i += 4, offset += 4) {
9738                 u32 data, page_off, phy_addr, nvram_cmd;
9739
9740                 memcpy(&data, buf + i, 4);
9741                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9742
9743                 page_off = offset % tp->nvram_pagesize;
9744
9745                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9746
9747                 tw32(NVRAM_ADDR, phy_addr);
9748
9749                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9750
9751                 if ((page_off == 0) || (i == 0))
9752                         nvram_cmd |= NVRAM_CMD_FIRST;
9753                 if (page_off == (tp->nvram_pagesize - 4))
9754                         nvram_cmd |= NVRAM_CMD_LAST;
9755
9756                 if (i == (len - 4))
9757                         nvram_cmd |= NVRAM_CMD_LAST;
9758
9759                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9760                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9761                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9762                     (tp->nvram_jedecnum == JEDEC_ST) &&
9763                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9764
9765                         if ((ret = tg3_nvram_exec_cmd(tp,
9766                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9767                                 NVRAM_CMD_DONE)))
9768
9769                                 break;
9770                 }
9771                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9772                         /* We always do complete word writes to eeprom. */
9773                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9774                 }
9775
9776                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9777                         break;
9778         }
9779         return ret;
9780 }
9781
9782 /* offset and length are dword aligned */
9783 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9784 {
9785         int ret;
9786
9787         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9788                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9789                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9790                 udelay(40);
9791         }
9792
9793         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9794                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9795         }
9796         else {
9797                 u32 grc_mode;
9798
9799                 ret = tg3_nvram_lock(tp);
9800                 if (ret)
9801                         return ret;
9802
9803                 tg3_enable_nvram_access(tp);
9804                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9805                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9806                         tw32(NVRAM_WRITE1, 0x406);
9807
9808                 grc_mode = tr32(GRC_MODE);
9809                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9810
9811                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9812                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9813
9814                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9815                                 buf);
9816                 }
9817                 else {
9818                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9819                                 buf);
9820                 }
9821
9822                 grc_mode = tr32(GRC_MODE);
9823                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9824
9825                 tg3_disable_nvram_access(tp);
9826                 tg3_nvram_unlock(tp);
9827         }
9828
9829         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9830                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9831                 udelay(40);
9832         }
9833
9834         return ret;
9835 }
9836
9837 struct subsys_tbl_ent {
9838         u16 subsys_vendor, subsys_devid;
9839         u32 phy_id;
9840 };
9841
9842 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9843         /* Broadcom boards. */
9844         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9845         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9846         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9847         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9848         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9849         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9850         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9851         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9852         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9853         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9854         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9855
9856         /* 3com boards. */
9857         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9858         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9859         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9860         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9861         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9862
9863         /* DELL boards. */
9864         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9865         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9866         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9867         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9868
9869         /* Compaq boards. */
9870         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9871         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9872         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9873         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9874         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9875
9876         /* IBM boards. */
9877         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9878 };
9879
9880 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9881 {
9882         int i;
9883
9884         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9885                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9886                      tp->pdev->subsystem_vendor) &&
9887                     (subsys_id_to_phy_id[i].subsys_devid ==
9888                      tp->pdev->subsystem_device))
9889                         return &subsys_id_to_phy_id[i];
9890         }
9891         return NULL;
9892 }
9893
9894 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9895 {
9896         u32 val;
9897         u16 pmcsr;
9898
9899         /* On some early chips the SRAM cannot be accessed in D3hot state,
9900          * so need make sure we're in D0.
9901          */
9902         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9903         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9904         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9905 #if (LINUX_VERSION_CODE < 0x20607)
9906         set_current_state(TASK_UNINTERRUPTIBLE);
9907         schedule_timeout(HZ / 1000);
9908 #else
9909         msleep(1);
9910 #endif
9911
9912         /* Make sure register accesses (indirect or otherwise)
9913          * will function correctly.
9914          */
9915         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9916                                tp->misc_host_ctrl);
9917
9918         /* The memory arbiter has to be enabled in order for SRAM accesses
9919          * to succeed.  Normally on powerup the tg3 chip firmware will make
9920          * sure it is enabled, but other entities such as system netboot
9921          * code might disable it.
9922          */
9923         val = tr32(MEMARB_MODE);
9924         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9925
9926         tp->phy_id = PHY_ID_INVALID;
9927         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9928
9929         /* Assume an onboard device by default.  */
9930         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9931
9932         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9933         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9934                 u32 nic_cfg, led_cfg;
9935                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9936                 int eeprom_phy_serdes = 0;
9937
9938                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9939                 tp->nic_sram_data_cfg = nic_cfg;
9940
9941                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9942                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9943                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9944                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9945                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9946                     (ver > 0) && (ver < 0x100))
9947                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9948
9949                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9950                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9951                         eeprom_phy_serdes = 1;
9952
9953                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9954                 if (nic_phy_id != 0) {
9955                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9956                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9957
9958                         eeprom_phy_id  = (id1 >> 16) << 10;
9959                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9960                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9961                 } else
9962                         eeprom_phy_id = 0;
9963
9964                 tp->phy_id = eeprom_phy_id;
9965                 if (eeprom_phy_serdes) {
9966                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9967                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9968                         else
9969                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9970                 }
9971
9972                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9973                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9974                                     SHASTA_EXT_LED_MODE_MASK);
9975                 else
9976                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9977
9978                 switch (led_cfg) {
9979                 default:
9980                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9981                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9982                         break;
9983
9984                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9985                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9986                         break;
9987
9988                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9989                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9990
9991                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9992                          * read on some older 5700/5701 bootcode.
9993                          */
9994                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9995                             ASIC_REV_5700 ||
9996                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9997                             ASIC_REV_5701)
9998                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9999
10000                         break;
10001
10002                 case SHASTA_EXT_LED_SHARED:
10003                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10004                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10005                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10006                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10007                                                  LED_CTRL_MODE_PHY_2);
10008                         break;
10009
10010                 case SHASTA_EXT_LED_MAC:
10011                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10012                         break;
10013
10014                 case SHASTA_EXT_LED_COMBO:
10015                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10016                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10017                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10018                                                  LED_CTRL_MODE_PHY_2);
10019                         break;
10020
10021                 };
10022
10023                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10024                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10025                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10026                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10027
10028                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
10029                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10030                 else
10031                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10032
10033                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10034                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10035                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10036                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10037                 }
10038                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
10039                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
10040
10041                 if (cfg2 & (1 << 17))
10042                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10043
10044                 /* serdes signal pre-emphasis in register 0x590 set by */
10045                 /* bootcode if bit 18 is set */
10046                 if (cfg2 & (1 << 18))
10047                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10048         }
10049 }
10050
10051 static int __devinit tg3_phy_probe(struct tg3 *tp)
10052 {
10053         u32 hw_phy_id_1, hw_phy_id_2;
10054         u32 hw_phy_id, hw_phy_id_masked;
10055         int err;
10056
10057         /* Reading the PHY ID register can conflict with ASF
10058          * firwmare access to the PHY hardware.
10059          */
10060         err = 0;
10061         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10062                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10063         } else {
10064                 /* Now read the physical PHY_ID from the chip and verify
10065                  * that it is sane.  If it doesn't look good, we fall back
10066                  * to either the hard-coded table based PHY_ID and failing
10067                  * that the value found in the eeprom area.
10068                  */
10069                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10070                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10071
10072                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10073                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10074                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10075
10076                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10077         }
10078
10079         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10080                 tp->phy_id = hw_phy_id;
10081                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10082                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10083                 else
10084                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10085         } else {
10086                 if (tp->phy_id != PHY_ID_INVALID) {
10087                         /* Do nothing, phy ID already set up in
10088                          * tg3_get_eeprom_hw_cfg().
10089                          */
10090                 } else {
10091                         struct subsys_tbl_ent *p;
10092
10093                         /* No eeprom signature?  Try the hardcoded
10094                          * subsys device table.
10095                          */
10096                         p = lookup_by_subsys(tp);
10097                         if (!p)
10098                                 return -ENODEV;
10099
10100                         tp->phy_id = p->phy_id;
10101                         if (!tp->phy_id ||
10102                             tp->phy_id == PHY_ID_BCM8002)
10103                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10104                 }
10105         }
10106
10107         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10108             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10109                 u32 bmsr, adv_reg, tg3_ctrl;
10110
10111                 tg3_readphy(tp, MII_BMSR, &bmsr);
10112                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10113                     (bmsr & BMSR_LSTATUS))
10114                         goto skip_phy_reset;
10115                     
10116                 err = tg3_phy_reset(tp);
10117                 if (err)
10118                         return err;
10119
10120                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10121                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10122                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10123                 tg3_ctrl = 0;
10124                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10125                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10126                                     MII_TG3_CTRL_ADV_1000_FULL);
10127                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10128                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10129                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10130                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10131                 }
10132
10133                 if (!tg3_copper_is_advertising_all(tp)) {
10134                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10135
10136                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10137                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10138
10139                         tg3_writephy(tp, MII_BMCR,
10140                                      BMCR_ANENABLE | BMCR_ANRESTART);
10141                 }
10142                 tg3_phy_set_wirespeed(tp);
10143
10144                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10145                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10146                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10147         }
10148
10149 skip_phy_reset:
10150         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10151                 err = tg3_init_5401phy_dsp(tp);
10152                 if (err)
10153                         return err;
10154         }
10155
10156         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10157                 err = tg3_init_5401phy_dsp(tp);
10158         }
10159
10160         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10161                 tp->link_config.advertising =
10162                         (ADVERTISED_1000baseT_Half |
10163                          ADVERTISED_1000baseT_Full |
10164                          ADVERTISED_Autoneg |
10165                          ADVERTISED_FIBRE);
10166         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10167                 tp->link_config.advertising &=
10168                         ~(ADVERTISED_1000baseT_Half |
10169                           ADVERTISED_1000baseT_Full);
10170
10171         return err;
10172 }
10173
10174 static void __devinit tg3_read_partno(struct tg3 *tp)
10175 {
10176         unsigned char vpd_data[256];
10177         int i;
10178         u32 magic;
10179
10180         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10181                 goto out_not_found;
10182
10183         if (magic == TG3_EEPROM_MAGIC) {
10184                 for (i = 0; i < 256; i += 4) {
10185                         u32 tmp;
10186
10187                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10188                                 goto out_not_found;
10189
10190                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10191                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10192                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10193                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10194                 }
10195         } else {
10196                 int vpd_cap;
10197
10198                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10199                 for (i = 0; i < 256; i += 4) {
10200                         u32 tmp, j = 0;
10201                         u16 tmp16;
10202
10203                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10204                                               i);
10205                         while (j++ < 100) {
10206                                 pci_read_config_word(tp->pdev, vpd_cap +
10207                                                      PCI_VPD_ADDR, &tmp16);
10208                                 if (tmp16 & 0x8000)
10209                                         break;
10210 #if (LINUX_VERSION_CODE < 0x20607)
10211                                 set_current_state(TASK_UNINTERRUPTIBLE);
10212                                 schedule_timeout(1);
10213 #else
10214                                 msleep(1);
10215 #endif
10216                         }
10217                         if (!(tmp16 & 0x8000))
10218                                 goto out_not_found;
10219
10220                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10221                                               &tmp);
10222                         tmp = cpu_to_le32(tmp);
10223                         memcpy(&vpd_data[i], &tmp, 4);
10224                 }
10225         }
10226
10227         /* Now parse and find the part number. */
10228         for (i = 0; i < 256; ) {
10229                 unsigned char val = vpd_data[i];
10230                 int block_end;
10231
10232                 if (val == 0x82 || val == 0x91) {
10233                         i = (i + 3 +
10234                              (vpd_data[i + 1] +
10235                               (vpd_data[i + 2] << 8)));
10236                         continue;
10237                 }
10238
10239                 if (val != 0x90)
10240                         goto out_not_found;
10241
10242                 block_end = (i + 3 +
10243                              (vpd_data[i + 1] +
10244                               (vpd_data[i + 2] << 8)));
10245                 i += 3;
10246                 while (i < block_end) {
10247                         if (vpd_data[i + 0] == 'P' &&
10248                             vpd_data[i + 1] == 'N') {
10249                                 int partno_len = vpd_data[i + 2];
10250
10251                                 if (partno_len > 24)
10252                                         goto out_not_found;
10253
10254                                 memcpy(tp->board_part_number,
10255                                        &vpd_data[i + 3],
10256                                        partno_len);
10257
10258                                 /* Success. */
10259                                 return;
10260                         }
10261                 }
10262
10263                 /* Part number not found. */
10264                 goto out_not_found;
10265         }
10266
10267 out_not_found:
10268         strcpy(tp->board_part_number, "none");
10269 }
10270
10271 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10272 {
10273         u32 val, offset, start;
10274
10275         if (tg3_nvram_read_swab(tp, 0, &val))
10276                 return;
10277
10278         if (val != TG3_EEPROM_MAGIC)
10279                 return;
10280
10281         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10282             tg3_nvram_read_swab(tp, 0x4, &start))
10283                 return;
10284
10285         offset = tg3_nvram_logical_addr(tp, offset);
10286         if (tg3_nvram_read_swab(tp, offset, &val))
10287                 return;
10288
10289         if ((val & 0xfc000000) == 0x0c000000) {
10290                 u32 ver_offset, addr;
10291                 int i;
10292
10293                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10294                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10295                         return;
10296
10297                 if (val != 0)
10298                         return;
10299
10300                 addr = offset + ver_offset - start;
10301                 for (i = 0; i < 16; i += 4) {
10302                         if (tg3_nvram_read(tp, addr + i, &val))
10303                                 return;
10304
10305                         val = cpu_to_le32(val);
10306                         memcpy(tp->fw_ver + i, &val, 4);
10307                 }
10308         }
10309 }
10310
10311 static int __devinit tg3_get_invariants(struct tg3 *tp)
10312 {
10313 #if (LINUX_VERSION_CODE >= 0x2060a)
10314         static struct pci_device_id write_reorder_chipsets[] = {
10315                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10316                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10317                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10318                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10319                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10320                              PCI_DEVICE_ID_VIA_8385_0) },
10321                 { },
10322         };
10323 #endif
10324         u32 misc_ctrl_reg;
10325         u32 cacheline_sz_reg;
10326         u32 pci_state_reg, grc_misc_cfg;
10327         u32 val;
10328         u16 pci_cmd;
10329         int err;
10330
10331         /* Force memory write invalidate off.  If we leave it on,
10332          * then on 5700_BX chips we have to enable a workaround.
10333          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10334          * to match the cacheline size.  The Broadcom driver have this
10335          * workaround but turns MWI off all the times so never uses
10336          * it.  This seems to suggest that the workaround is insufficient.
10337          */
10338         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10339         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10340         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10341
10342         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10343          * has the register indirect write enable bit set before
10344          * we try to access any of the MMIO registers.  It is also
10345          * critical that the PCI-X hw workaround situation is decided
10346          * before that as well.
10347          */
10348         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10349                               &misc_ctrl_reg);
10350
10351         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10352                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10353
10354         /* Wrong chip ID in 5752 A0. This code can be removed later
10355          * as A0 is not in production.
10356          */
10357         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10358                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10359
10360         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10361          * we need to disable memory and use config. cycles
10362          * only to access all registers. The 5702/03 chips
10363          * can mistakenly decode the special cycles from the
10364          * ICH chipsets as memory write cycles, causing corruption
10365          * of register and memory space. Only certain ICH bridges
10366          * will drive special cycles with non-zero data during the
10367          * address phase which can fall within the 5703's address
10368          * range. This is not an ICH bug as the PCI spec allows
10369          * non-zero address during special cycles. However, only
10370          * these ICH bridges are known to drive non-zero addresses
10371          * during special cycles.
10372          *
10373          * Since special cycles do not cross PCI bridges, we only
10374          * enable this workaround if the 5703 is on the secondary
10375          * bus of these ICH bridges.
10376          */
10377         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10378             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10379                 static struct tg3_dev_id {
10380                         u32     vendor;
10381                         u32     device;
10382                         u32     rev;
10383                 } ich_chipsets[] = {
10384                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10385                           PCI_ANY_ID },
10386                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10387                           PCI_ANY_ID },
10388                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10389                           0xa },
10390                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10391                           PCI_ANY_ID },
10392                         { },
10393                 };
10394                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10395                 struct pci_dev *bridge = NULL;
10396
10397                 while (pci_id->vendor != 0) {
10398                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10399                                                 bridge);
10400                         if (!bridge) {
10401                                 pci_id++;
10402                                 continue;
10403                         }
10404                         if (pci_id->rev != PCI_ANY_ID) {
10405                                 u8 rev;
10406
10407                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10408                                                      &rev);
10409                                 if (rev > pci_id->rev)
10410                                         continue;
10411                         }
10412                         if (bridge->subordinate &&
10413                             (bridge->subordinate->number ==
10414                              tp->pdev->bus->number)) {
10415
10416                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10417                                 pci_dev_put(bridge);
10418                                 break;
10419                         }
10420                 }
10421         }
10422
10423         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10424          * DMA addresses > 40-bit. This bridge may have other additional
10425          * 57xx devices behind it in some 4-port NIC designs for example.
10426          * Any tg3 device found behind the bridge will also need the 40-bit
10427          * DMA workaround.
10428          */
10429         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10430             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10431                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10432                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10433                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10434         }
10435         else {
10436                 struct pci_dev *bridge = NULL;
10437
10438                 do {
10439                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10440                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10441                                                 bridge);
10442                         if (bridge && bridge->subordinate &&
10443                             (bridge->subordinate->number <=
10444                              tp->pdev->bus->number) &&
10445                             (bridge->subordinate->subordinate >=
10446                              tp->pdev->bus->number)) {
10447                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10448                                 pci_dev_put(bridge);
10449                                 break;
10450                         }
10451                 } while (bridge);
10452         }
10453
10454         /* Initialize misc host control in PCI block. */
10455         tp->misc_host_ctrl |= (misc_ctrl_reg &
10456                                MISC_HOST_CTRL_CHIPREV);
10457         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10458                                tp->misc_host_ctrl);
10459
10460         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10461                               &cacheline_sz_reg);
10462
10463         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10464         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10465         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10466         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10467
10468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10469             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10472             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10473                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10474
10475         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10476             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10477                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10478
10479         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10480                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10481                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10482                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10483                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10484                 } else {
10485                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10486                                           TG3_FLG2_HW_TSO_1_BUG;
10487                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10488                                 ASIC_REV_5750 &&
10489                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10490                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10491                 }
10492         }
10493
10494         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10495             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10496             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10497             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10498             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10499                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10500
10501         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10502                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10503
10504         /* If we have an AMD 762 or VIA K8T800 chipset, write
10505          * reordering to the mailbox registers done by the host
10506          * controller can cause major troubles.  We read back from
10507          * every mailbox register write to force the writes to be
10508          * posted to the chip in order.
10509          */
10510 #if (LINUX_VERSION_CODE < 0x2060a)
10511         if ((pci_find_device(PCI_VENDOR_ID_AMD,
10512                              PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
10513              pci_find_device(PCI_VENDOR_ID_AMD,
10514                              PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
10515              pci_find_device(PCI_VENDOR_ID_VIA,
10516                              PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
10517 #else
10518         if (pci_dev_present(write_reorder_chipsets) &&
10519 #endif
10520             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10521                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10522
10523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10524             tp->pci_lat_timer < 64) {
10525                 tp->pci_lat_timer = 64;
10526
10527                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10528                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10529                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10530                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10531
10532                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10533                                        cacheline_sz_reg);
10534         }
10535
10536         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10537                               &pci_state_reg);
10538
10539         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10540                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10541
10542                 /* If this is a 5700 BX chipset, and we are in PCI-X
10543                  * mode, enable register write workaround.
10544                  *
10545                  * The workaround is to use indirect register accesses
10546                  * for all chip writes not to mailbox registers.
10547                  */
10548                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10549                         u32 pm_reg;
10550                         u16 pci_cmd;
10551
10552                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10553
10554                         /* The chip can have it's power management PCI config
10555                          * space registers clobbered due to this bug.
10556                          * So explicitly force the chip into D0 here.
10557                          */
10558                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10559                                               &pm_reg);
10560                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10561                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10562                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10563                                                pm_reg);
10564
10565                         /* Also, force SERR#/PERR# in PCI command. */
10566                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10567                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10568                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10569                 }
10570         }
10571
10572         /* 5700 BX chips need to have their TX producer index mailboxes
10573          * written twice to workaround a bug.
10574          */
10575         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10576                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10577
10578         /* Back to back register writes can cause problems on this chip,
10579          * the workaround is to read back all reg writes except those to
10580          * mailbox regs.  See tg3_write_indirect_reg32().
10581          *
10582          * PCI Express 5750_A0 rev chips need this workaround too.
10583          */
10584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10585             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10586              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10587                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10588
10589         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10590                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10591         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10592                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10593
10594         /* Chip-specific fixup from Broadcom driver */
10595         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10596             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10597                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10598                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10599         }
10600
10601         /* Default fast path register access methods */
10602         tp->read32 = tg3_read32;
10603         tp->write32 = tg3_write32;
10604         tp->read32_mbox = tg3_read32;
10605         tp->write32_mbox = tg3_write32;
10606         tp->write32_tx_mbox = tg3_write32;
10607         tp->write32_rx_mbox = tg3_write32;
10608
10609         /* Various workaround register access methods */
10610         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10611                 tp->write32 = tg3_write_indirect_reg32;
10612         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10613                 tp->write32 = tg3_write_flush_reg32;
10614
10615         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10616             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10617                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10618                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10619                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10620         }
10621
10622         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10623                 tp->read32 = tg3_read_indirect_reg32;
10624                 tp->write32 = tg3_write_indirect_reg32;
10625                 tp->read32_mbox = tg3_read_indirect_mbox;
10626                 tp->write32_mbox = tg3_write_indirect_mbox;
10627                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10628                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10629
10630                 iounmap(tp->regs);
10631                 tp->regs = NULL;
10632
10633                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10634                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10635                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10636         }
10637
10638         if (tp->write32 == tg3_write_indirect_reg32 ||
10639             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10640              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10641               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10642                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10643
10644         /* Get eeprom hw config before calling tg3_set_power_state().
10645          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10646          * determined before calling tg3_set_power_state() so that
10647          * we know whether or not to switch out of Vaux power.
10648          * When the flag is set, it means that GPIO1 is used for eeprom
10649          * write protect and also implies that it is a LOM where GPIOs
10650          * are not used to switch power.
10651          */ 
10652         tg3_get_eeprom_hw_cfg(tp);
10653
10654         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10655          * GPIO1 driven high will bring 5700's external PHY out of reset.
10656          * It is also used as eeprom write protect on LOMs.
10657          */
10658         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10659         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10660             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10661                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10662                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10663         /* Unused GPIO3 must be driven as output on 5752 because there
10664          * are no pull-up resistors on unused GPIO pins.
10665          */
10666         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10667                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10668
10669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10670                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10671
10672         /* Force the chip into D0. */
10673         err = tg3_set_power_state(tp, PCI_D0);
10674         if (err) {
10675                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10676                        pci_name(tp->pdev));
10677                 return err;
10678         }
10679
10680         /* 5700 B0 chips do not support checksumming correctly due
10681          * to hardware bugs.
10682          */
10683         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10684                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10685
10686         /* Derive initial jumbo mode from MTU assigned in
10687          * ether_setup() via the alloc_etherdev() call
10688          */
10689         if (tp->dev->mtu > ETH_DATA_LEN &&
10690             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10691                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10692
10693         /* Determine WakeOnLan speed to use. */
10694         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10695             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10696             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10697             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10698                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10699         } else {
10700                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10701         }
10702
10703         /* A few boards don't want Ethernet@WireSpeed phy feature */
10704         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10705             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10706              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10707              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10708             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10709                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10710
10711         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10712             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10713                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10714         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10715                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10716
10717         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10719                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10720                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10721                 else
10722                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10723         }
10724
10725         tp->coalesce_mode = 0;
10726         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10727             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10728                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10729
10730         /* Initialize MAC MI mode, polling disabled. */
10731         tw32_f(MAC_MI_MODE, tp->mi_mode);
10732         udelay(80);
10733
10734         /* Initialize data/descriptor byte/word swapping. */
10735         val = tr32(GRC_MODE);
10736         val &= GRC_MODE_HOST_STACKUP;
10737         tw32(GRC_MODE, val | tp->grc_mode);
10738
10739         tg3_switch_clocks(tp);
10740
10741         /* Clear this out for sanity. */
10742         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10743
10744         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10745                               &pci_state_reg);
10746         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10747             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10748                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10749
10750                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10751                     chiprevid == CHIPREV_ID_5701_B0 ||
10752                     chiprevid == CHIPREV_ID_5701_B2 ||
10753                     chiprevid == CHIPREV_ID_5701_B5) {
10754                         void __iomem *sram_base;
10755
10756                         /* Write some dummy words into the SRAM status block
10757                          * area, see if it reads back correctly.  If the return
10758                          * value is bad, force enable the PCIX workaround.
10759                          */
10760                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10761
10762                         writel(0x00000000, sram_base);
10763                         writel(0x00000000, sram_base + 4);
10764                         writel(0xffffffff, sram_base + 4);
10765                         if (readl(sram_base) != 0x00000000)
10766                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10767                 }
10768         }
10769
10770         udelay(50);
10771         tg3_nvram_init(tp);
10772
10773         grc_misc_cfg = tr32(GRC_MISC_CFG);
10774         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10775
10776         /* Broadcom's driver says that CIOBE multisplit has a bug */
10777 #if 0
10778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10779             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10780                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10781                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10782         }
10783 #endif
10784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10785             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10786              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10787                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10788
10789         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10790             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10791                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10792         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10793                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10794                                       HOSTCC_MODE_CLRTICK_TXBD);
10795
10796                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10797                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10798                                        tp->misc_host_ctrl);
10799         }
10800
10801         /* these are limited to 10/100 only */
10802         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10803              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10804             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10805              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10806              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10807               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10808               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10809             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10810              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10811               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10812                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10813
10814         err = tg3_phy_probe(tp);
10815         if (err) {
10816                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10817                        pci_name(tp->pdev), err);
10818                 /* ... but do not return immediately ... */
10819         }
10820
10821         tg3_read_partno(tp);
10822         tg3_read_fw_ver(tp);
10823
10824         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10825                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10826         } else {
10827                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10828                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10829                 else
10830                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10831         }
10832
10833         /* 5700 {AX,BX} chips have a broken status block link
10834          * change bit implementation, so we must use the
10835          * status register in those cases.
10836          */
10837         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10838                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10839         else
10840                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10841
10842         /* The led_ctrl is set during tg3_phy_probe, here we might
10843          * have to force the link status polling mechanism based
10844          * upon subsystem IDs.
10845          */
10846         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10847             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10848                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10849                                   TG3_FLAG_USE_LINKCHG_REG);
10850         }
10851
10852         /* For all SERDES we poll the MAC status register. */
10853         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10854                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10855         else
10856                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10857
10858         /* All chips before 5787 can get confused if TX buffers
10859          * straddle the 4GB address boundary in some cases.
10860          */
10861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10862             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10863                 tp->dev->hard_start_xmit = tg3_start_xmit;
10864         else
10865                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10866
10867         tp->rx_offset = 2;
10868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10869             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10870                 tp->rx_offset = 0;
10871
10872         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10873
10874         /* Increment the rx prod index on the rx std ring by at most
10875          * 8 for these chips to workaround hw errata.
10876          */
10877         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10880                 tp->rx_std_max_post = 8;
10881
10882         /* By default, disable wake-on-lan.  User can change this
10883          * using ETHTOOL_SWOL.
10884          */
10885         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10886
10887         return err;
10888 }
10889
10890 #ifdef CONFIG_SPARC64
10891 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10892 {
10893         struct net_device *dev = tp->dev;
10894         struct pci_dev *pdev = tp->pdev;
10895         struct pcidev_cookie *pcp = pdev->sysdata;
10896
10897         if (pcp != NULL) {
10898                 unsigned char *addr;
10899                 int len;
10900
10901                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10902                                         &len);
10903                 if (addr && len == 6) {
10904                         memcpy(dev->dev_addr, addr, 6);
10905                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10906                         return 0;
10907                 }
10908         }
10909         return -ENODEV;
10910 }
10911
10912 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10913 {
10914         struct net_device *dev = tp->dev;
10915
10916         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10917         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10918         return 0;
10919 }
10920 #endif
10921
10922 static int __devinit tg3_get_device_address(struct tg3 *tp)
10923 {
10924         struct net_device *dev = tp->dev;
10925         u32 hi, lo, mac_offset;
10926         int addr_ok = 0;
10927
10928 #ifdef CONFIG_SPARC64
10929         if (!tg3_get_macaddr_sparc(tp))
10930                 return 0;
10931 #endif
10932
10933         mac_offset = 0x7c;
10934         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10935             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10936                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10937                         mac_offset = 0xcc;
10938                 if (tg3_nvram_lock(tp))
10939                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10940                 else
10941                         tg3_nvram_unlock(tp);
10942         }
10943
10944         /* First try to get it from MAC address mailbox. */
10945         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10946         if ((hi >> 16) == 0x484b) {
10947                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10948                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10949
10950                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10951                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10952                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10953                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10954                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10955
10956                 /* Some old bootcode may report a 0 MAC address in SRAM */
10957                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10958         }
10959         if (!addr_ok) {
10960                 /* Next, try NVRAM. */
10961                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10962                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10963                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10964                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10965                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10966                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10967                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10968                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10969                 }
10970                 /* Finally just fetch it out of the MAC control regs. */
10971                 else {
10972                         hi = tr32(MAC_ADDR_0_HIGH);
10973                         lo = tr32(MAC_ADDR_0_LOW);
10974
10975                         dev->dev_addr[5] = lo & 0xff;
10976                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10977                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10978                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10979                         dev->dev_addr[1] = hi & 0xff;
10980                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10981                 }
10982         }
10983
10984         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10985 #ifdef CONFIG_SPARC64
10986                 if (!tg3_get_default_macaddr_sparc(tp))
10987                         return 0;
10988 #endif
10989                 return -EINVAL;
10990         }
10991 #ifdef ETHTOOL_GPERMADDR
10992         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10993 #endif
10994         return 0;
10995 }
10996
10997 #define BOUNDARY_SINGLE_CACHELINE       1
10998 #define BOUNDARY_MULTI_CACHELINE        2
10999
11000 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11001 {
11002         int cacheline_size;
11003         u8 byte;
11004         int goal;
11005
11006         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11007         if (byte == 0)
11008                 cacheline_size = 1024;
11009         else
11010                 cacheline_size = (int) byte * 4;
11011
11012         /* On 5703 and later chips, the boundary bits have no
11013          * effect.
11014          */
11015         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11016             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11017             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11018                 goto out;
11019
11020 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11021         goal = BOUNDARY_MULTI_CACHELINE;
11022 #else
11023 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11024         goal = BOUNDARY_SINGLE_CACHELINE;
11025 #else
11026         goal = 0;
11027 #endif
11028 #endif
11029
11030         if (!goal)
11031                 goto out;
11032
11033         /* PCI controllers on most RISC systems tend to disconnect
11034          * when a device tries to burst across a cache-line boundary.
11035          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11036          *
11037          * Unfortunately, for PCI-E there are only limited
11038          * write-side controls for this, and thus for reads
11039          * we will still get the disconnects.  We'll also waste
11040          * these PCI cycles for both read and write for chips
11041          * other than 5700 and 5701 which do not implement the
11042          * boundary bits.
11043          */
11044         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11045             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11046                 switch (cacheline_size) {
11047                 case 16:
11048                 case 32:
11049                 case 64:
11050                 case 128:
11051                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11052                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11053                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11054                         } else {
11055                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11056                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11057                         }
11058                         break;
11059
11060                 case 256:
11061                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11062                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11063                         break;
11064
11065                 default:
11066                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11067                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11068                         break;
11069                 };
11070         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11071                 switch (cacheline_size) {
11072                 case 16:
11073                 case 32:
11074                 case 64:
11075                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11076                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11077                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11078                                 break;
11079                         }
11080                         /* fallthrough */
11081                 case 128:
11082                 default:
11083                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11084                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11085                         break;
11086                 };
11087         } else {
11088                 switch (cacheline_size) {
11089                 case 16:
11090                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11091                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11092                                         DMA_RWCTRL_WRITE_BNDRY_16);
11093                                 break;
11094                         }
11095                         /* fallthrough */
11096                 case 32:
11097                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11098                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11099                                         DMA_RWCTRL_WRITE_BNDRY_32);
11100                                 break;
11101                         }
11102                         /* fallthrough */
11103                 case 64:
11104                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11105                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11106                                         DMA_RWCTRL_WRITE_BNDRY_64);
11107                                 break;
11108                         }
11109                         /* fallthrough */
11110                 case 128:
11111                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11112                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11113                                         DMA_RWCTRL_WRITE_BNDRY_128);
11114                                 break;
11115                         }
11116                         /* fallthrough */
11117                 case 256:
11118                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11119                                 DMA_RWCTRL_WRITE_BNDRY_256);
11120                         break;
11121                 case 512:
11122                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11123                                 DMA_RWCTRL_WRITE_BNDRY_512);
11124                         break;
11125                 case 1024:
11126                 default:
11127                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11128                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11129                         break;
11130                 };
11131         }
11132
11133 out:
11134         return val;
11135 }
11136
11137 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11138 {
11139         struct tg3_internal_buffer_desc test_desc;
11140         u32 sram_dma_descs;
11141         int i, ret;
11142
11143         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11144
11145         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11146         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11147         tw32(RDMAC_STATUS, 0);
11148         tw32(WDMAC_STATUS, 0);
11149
11150         tw32(BUFMGR_MODE, 0);
11151         tw32(FTQ_RESET, 0);
11152
11153         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11154         test_desc.addr_lo = buf_dma & 0xffffffff;
11155         test_desc.nic_mbuf = 0x00002100;
11156         test_desc.len = size;
11157
11158         /*
11159          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11160          * the *second* time the tg3 driver was getting loaded after an
11161          * initial scan.
11162          *
11163          * Broadcom tells me:
11164          *   ...the DMA engine is connected to the GRC block and a DMA
11165          *   reset may affect the GRC block in some unpredictable way...
11166          *   The behavior of resets to individual blocks has not been tested.
11167          *
11168          * Broadcom noted the GRC reset will also reset all sub-components.
11169          */
11170         if (to_device) {
11171                 test_desc.cqid_sqid = (13 << 8) | 2;
11172
11173                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11174                 udelay(40);
11175         } else {
11176                 test_desc.cqid_sqid = (16 << 8) | 7;
11177
11178                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11179                 udelay(40);
11180         }
11181         test_desc.flags = 0x00000005;
11182
11183         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11184                 u32 val;
11185
11186                 val = *(((u32 *)&test_desc) + i);
11187                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11188                                        sram_dma_descs + (i * sizeof(u32)));
11189                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11190         }
11191         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11192
11193         if (to_device) {
11194                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11195         } else {
11196                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11197         }
11198
11199         ret = -ENODEV;
11200         for (i = 0; i < 40; i++) {
11201                 u32 val;
11202
11203                 if (to_device)
11204                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11205                 else
11206                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11207                 if ((val & 0xffff) == sram_dma_descs) {
11208                         ret = 0;
11209                         break;
11210                 }
11211
11212                 udelay(100);
11213         }
11214
11215         return ret;
11216 }
11217
11218 #define TEST_BUFFER_SIZE        0x2000
11219
11220 static int __devinit tg3_test_dma(struct tg3 *tp)
11221 {
11222         dma_addr_t buf_dma;
11223         u32 *buf, saved_dma_rwctrl;
11224         int ret;
11225
11226         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11227         if (!buf) {
11228                 ret = -ENOMEM;
11229                 goto out_nofree;
11230         }
11231
11232         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11233                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11234
11235         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11236
11237         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11238                 /* DMA read watermark not used on PCIE */
11239                 tp->dma_rwctrl |= 0x00180000;
11240         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11241                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11242                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11243                         tp->dma_rwctrl |= 0x003f0000;
11244                 else
11245                         tp->dma_rwctrl |= 0x003f000f;
11246         } else {
11247                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11248                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11249                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11250
11251                         /* If the 5704 is behind the EPB bridge, we can
11252                          * do the less restrictive ONE_DMA workaround for
11253                          * better performance.
11254                          */
11255                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11256                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11257                                 tp->dma_rwctrl |= 0x8000;
11258                         else if (ccval == 0x6 || ccval == 0x7)
11259                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11260
11261                         /* Set bit 23 to enable PCIX hw bug fix */
11262                         tp->dma_rwctrl |= 0x009f0000;
11263                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11264                         /* 5780 always in PCIX mode */
11265                         tp->dma_rwctrl |= 0x00144000;
11266                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11267                         /* 5714 always in PCIX mode */
11268                         tp->dma_rwctrl |= 0x00148000;
11269                 } else {
11270                         tp->dma_rwctrl |= 0x001b000f;
11271                 }
11272         }
11273
11274         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11275             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11276                 tp->dma_rwctrl &= 0xfffffff0;
11277
11278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11279             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11280                 /* Remove this if it causes problems for some boards. */
11281                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11282
11283                 /* On 5700/5701 chips, we need to set this bit.
11284                  * Otherwise the chip will issue cacheline transactions
11285                  * to streamable DMA memory with not all the byte
11286                  * enables turned on.  This is an error on several
11287                  * RISC PCI controllers, in particular sparc64.
11288                  *
11289                  * On 5703/5704 chips, this bit has been reassigned
11290                  * a different meaning.  In particular, it is used
11291                  * on those chips to enable a PCI-X workaround.
11292                  */
11293                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11294         }
11295
11296         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11297
11298 #if 0
11299         /* Unneeded, already done by tg3_get_invariants.  */
11300         tg3_switch_clocks(tp);
11301 #endif
11302
11303         ret = 0;
11304         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11305             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11306                 goto out;
11307
11308         /* It is best to perform DMA test with maximum write burst size
11309          * to expose the 5700/5701 write DMA bug.
11310          */
11311         saved_dma_rwctrl = tp->dma_rwctrl;
11312         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11313         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11314
11315         while (1) {
11316                 u32 *p = buf, i;
11317
11318                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11319                         p[i] = i;
11320
11321                 /* Send the buffer to the chip. */
11322                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11323                 if (ret) {
11324                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11325                         break;
11326                 }
11327
11328 #if 0
11329                 /* validate data reached card RAM correctly. */
11330                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11331                         u32 val;
11332                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11333                         if (le32_to_cpu(val) != p[i]) {
11334                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11335                                 /* ret = -ENODEV here? */
11336                         }
11337                         p[i] = 0;
11338                 }
11339 #endif
11340                 /* Now read it back. */
11341                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11342                 if (ret) {
11343                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11344
11345                         break;
11346                 }
11347
11348                 /* Verify it. */
11349                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11350                         if (p[i] == i)
11351                                 continue;
11352
11353                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11354                             DMA_RWCTRL_WRITE_BNDRY_16) {
11355                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11356                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11357                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11358                                 break;
11359                         } else {
11360                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11361                                 ret = -ENODEV;
11362                                 goto out;
11363                         }
11364                 }
11365
11366                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11367                         /* Success. */
11368                         ret = 0;
11369                         break;
11370                 }
11371         }
11372         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11373             DMA_RWCTRL_WRITE_BNDRY_16) {
11374 #if (LINUX_VERSION_CODE >= 0x2060a)
11375                 static struct pci_device_id dma_wait_state_chipsets[] = {
11376                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11377                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11378                         { },
11379                 };
11380 #endif
11381
11382                 /* DMA test passed without adjusting DMA boundary,
11383                  * now look for chipsets that are known to expose the
11384                  * DMA bug without failing the test.
11385                  */
11386 #if (LINUX_VERSION_CODE < 0x2060a)
11387                 if (pci_find_device(PCI_VENDOR_ID_APPLE,
11388                         PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
11389 #else
11390                 if (pci_dev_present(dma_wait_state_chipsets))
11391 #endif
11392                 {
11393                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11394                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11395                 }
11396                 else
11397                         /* Safe to use the calculated DMA boundary. */
11398                         tp->dma_rwctrl = saved_dma_rwctrl;
11399
11400                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11401         }
11402
11403 out:
11404         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11405 out_nofree:
11406         return ret;
11407 }
11408
11409 static void __devinit tg3_init_link_config(struct tg3 *tp)
11410 {
11411         tp->link_config.advertising =
11412                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11413                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11414                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11415                  ADVERTISED_Autoneg | ADVERTISED_MII);
11416         tp->link_config.speed = SPEED_INVALID;
11417         tp->link_config.duplex = DUPLEX_INVALID;
11418         tp->link_config.autoneg = AUTONEG_ENABLE;
11419         tp->link_config.active_speed = SPEED_INVALID;
11420         tp->link_config.active_duplex = DUPLEX_INVALID;
11421         tp->link_config.phy_is_low_power = 0;
11422         tp->link_config.orig_speed = SPEED_INVALID;
11423         tp->link_config.orig_duplex = DUPLEX_INVALID;
11424         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11425 }
11426
11427 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11428 {
11429         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11430                 tp->bufmgr_config.mbuf_read_dma_low_water =
11431                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11432                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11433                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11434                 tp->bufmgr_config.mbuf_high_water =
11435                         DEFAULT_MB_HIGH_WATER_5705;
11436
11437                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11438                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11439                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11440                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11441                 tp->bufmgr_config.mbuf_high_water_jumbo =
11442                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11443         } else {
11444                 tp->bufmgr_config.mbuf_read_dma_low_water =
11445                         DEFAULT_MB_RDMA_LOW_WATER;
11446                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11447                         DEFAULT_MB_MACRX_LOW_WATER;
11448                 tp->bufmgr_config.mbuf_high_water =
11449                         DEFAULT_MB_HIGH_WATER;
11450
11451                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11452                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11453                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11454                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11455                 tp->bufmgr_config.mbuf_high_water_jumbo =
11456                         DEFAULT_MB_HIGH_WATER_JUMBO;
11457         }
11458
11459         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11460         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11461 }
11462
11463 static char * __devinit tg3_phy_string(struct tg3 *tp)
11464 {
11465         switch (tp->phy_id & PHY_ID_MASK) {
11466         case PHY_ID_BCM5400:    return "5400";
11467         case PHY_ID_BCM5401:    return "5401";
11468         case PHY_ID_BCM5411:    return "5411";
11469         case PHY_ID_BCM5701:    return "5701";
11470         case PHY_ID_BCM5703:    return "5703";
11471         case PHY_ID_BCM5704:    return "5704";
11472         case PHY_ID_BCM5705:    return "5705";
11473         case PHY_ID_BCM5750:    return "5750";
11474         case PHY_ID_BCM5752:    return "5752";
11475         case PHY_ID_BCM5714:    return "5714";
11476         case PHY_ID_BCM5780:    return "5780";
11477         case PHY_ID_BCM5755:    return "5755";
11478         case PHY_ID_BCM5787:    return "5787";
11479         case PHY_ID_BCM8002:    return "8002/serdes";
11480         case 0:                 return "serdes";
11481         default:                return "unknown";
11482         };
11483 }
11484
11485 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11486 {
11487         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11488                 strcpy(str, "PCI Express");
11489                 return str;
11490         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11491                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11492
11493                 strcpy(str, "PCIX:");
11494
11495                 if ((clock_ctrl == 7) ||
11496                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11497                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11498                         strcat(str, "133MHz");
11499                 else if (clock_ctrl == 0)
11500                         strcat(str, "33MHz");
11501                 else if (clock_ctrl == 2)
11502                         strcat(str, "50MHz");
11503                 else if (clock_ctrl == 4)
11504                         strcat(str, "66MHz");
11505                 else if (clock_ctrl == 6)
11506                         strcat(str, "100MHz");
11507         } else {
11508                 strcpy(str, "PCI:");
11509                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11510                         strcat(str, "66MHz");
11511                 else
11512                         strcat(str, "33MHz");
11513         }
11514         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11515                 strcat(str, ":32-bit");
11516         else
11517                 strcat(str, ":64-bit");
11518         return str;
11519 }
11520
11521 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11522 {
11523         struct pci_dev *peer;
11524         unsigned int func, devnr = tp->pdev->devfn & ~7;
11525
11526         for (func = 0; func < 8; func++) {
11527                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11528                 if (peer && peer != tp->pdev)
11529                         break;
11530                 pci_dev_put(peer);
11531         }
11532         /* 5704 can be configured in single-port mode, set peer to
11533          * tp->pdev in that case.
11534          */
11535         if (!peer) {
11536                 peer = tp->pdev;
11537                 return peer;
11538         }
11539
11540         /*
11541          * We don't need to keep the refcount elevated; there's no way
11542          * to remove one half of this device without removing the other
11543          */
11544         pci_dev_put(peer);
11545
11546         return peer;
11547 }
11548
11549 static void __devinit tg3_init_coal(struct tg3 *tp)
11550 {
11551         struct ethtool_coalesce *ec = &tp->coal;
11552
11553         memset(ec, 0, sizeof(*ec));
11554         ec->cmd = ETHTOOL_GCOALESCE;
11555         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11556         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11557         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11558         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11559         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11560         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11561         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11562         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11563         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11564
11565         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11566                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11567                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11568                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11569                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11570                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11571         }
11572
11573         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11574                 ec->rx_coalesce_usecs_irq = 0;
11575                 ec->tx_coalesce_usecs_irq = 0;
11576                 ec->stats_block_coalesce_usecs = 0;
11577         }
11578 }
11579
11580 static int __devinit tg3_init_one(struct pci_dev *pdev,
11581                                   const struct pci_device_id *ent)
11582 {
11583         static int tg3_version_printed = 0;
11584         unsigned long tg3reg_base, tg3reg_len;
11585         struct net_device *dev;
11586         struct tg3 *tp;
11587         int i, err, pm_cap;
11588         char str[40];
11589         u64 dma_mask, persist_dma_mask;
11590
11591         if (tg3_version_printed++ == 0)
11592                 printk(KERN_INFO "%s", version);
11593
11594         err = pci_enable_device(pdev);
11595         if (err) {
11596                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11597                        "aborting.\n");
11598                 return err;
11599         }
11600
11601         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11602                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11603                        "base address, aborting.\n");
11604                 err = -ENODEV;
11605                 goto err_out_disable_pdev;
11606         }
11607
11608         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11609         if (err) {
11610                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11611                        "aborting.\n");
11612                 goto err_out_disable_pdev;
11613         }
11614
11615         pci_set_master(pdev);
11616
11617         /* Find power-management capability. */
11618         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11619         if (pm_cap == 0) {
11620                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11621                        "aborting.\n");
11622                 err = -EIO;
11623                 goto err_out_free_res;
11624         }
11625
11626         tg3reg_base = pci_resource_start(pdev, 0);
11627         tg3reg_len = pci_resource_len(pdev, 0);
11628
11629         dev = alloc_etherdev(sizeof(*tp));
11630         if (!dev) {
11631                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11632                 err = -ENOMEM;
11633                 goto err_out_free_res;
11634         }
11635
11636         SET_MODULE_OWNER(dev);
11637 #if (LINUX_VERSION_CODE >= 0x20419)
11638         SET_NETDEV_DEV(dev, &pdev->dev);
11639 #endif
11640
11641 #if TG3_VLAN_TAG_USED
11642         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11643         dev->vlan_rx_register = tg3_vlan_rx_register;
11644         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11645 #endif
11646
11647         tp = netdev_priv(dev);
11648         tp->pdev = pdev;
11649         tp->dev = dev;
11650         tp->pm_cap = pm_cap;
11651         tp->mac_mode = TG3_DEF_MAC_MODE;
11652         tp->rx_mode = TG3_DEF_RX_MODE;
11653         tp->tx_mode = TG3_DEF_TX_MODE;
11654         tp->mi_mode = MAC_MI_MODE_BASE;
11655         if (tg3_debug > 0)
11656                 tp->msg_enable = tg3_debug;
11657         else
11658                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11659
11660         /* The word/byte swap controls here control register access byte
11661          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11662          * setting below.
11663          */
11664         tp->misc_host_ctrl =
11665                 MISC_HOST_CTRL_MASK_PCI_INT |
11666                 MISC_HOST_CTRL_WORD_SWAP |
11667                 MISC_HOST_CTRL_INDIR_ACCESS |
11668                 MISC_HOST_CTRL_PCISTATE_RW;
11669
11670         /* The NONFRM (non-frame) byte/word swap controls take effect
11671          * on descriptor entries, anything which isn't packet data.
11672          *
11673          * The StrongARM chips on the board (one for tx, one for rx)
11674          * are running in big-endian mode.
11675          */
11676         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11677                         GRC_MODE_WSWAP_NONFRM_DATA);
11678 #ifdef __BIG_ENDIAN
11679         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11680 #endif
11681         spin_lock_init(&tp->lock);
11682         spin_lock_init(&tp->indirect_lock);
11683         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11684
11685         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11686         if (tp->regs == 0UL) {
11687                 printk(KERN_ERR PFX "Cannot map device registers, "
11688                        "aborting.\n");
11689                 err = -ENOMEM;
11690                 goto err_out_free_dev;
11691         }
11692
11693         tg3_init_link_config(tp);
11694
11695         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11696         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11697         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11698
11699         dev->open = tg3_open;
11700         dev->stop = tg3_close;
11701         dev->get_stats = tg3_get_stats;
11702         dev->set_multicast_list = tg3_set_rx_mode;
11703         dev->set_mac_address = tg3_set_mac_addr;
11704         dev->do_ioctl = tg3_ioctl;
11705         dev->tx_timeout = tg3_tx_timeout;
11706         dev->poll = tg3_poll;
11707         dev->ethtool_ops = &tg3_ethtool_ops;
11708         dev->weight = 64;
11709         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11710         dev->change_mtu = tg3_change_mtu;
11711         dev->irq = pdev->irq;
11712 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11713         dev->poll_controller = tg3_poll_controller;
11714 #endif
11715
11716         err = tg3_get_invariants(tp);
11717         if (err) {
11718                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11719                        "aborting.\n");
11720                 goto err_out_iounmap;
11721         }
11722
11723         /* The EPB bridge inside 5714, 5715, and 5780 and any
11724          * device behind the EPB cannot support DMA addresses > 40-bit.
11725          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11726          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11727          * do DMA address check in tg3_start_xmit().
11728          */
11729         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11730                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11731         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11732                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11733 #ifdef CONFIG_HIGHMEM
11734                 dma_mask = DMA_64BIT_MASK;
11735 #endif
11736         } else
11737                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11738
11739         /* Configure DMA attributes. */
11740         if (dma_mask > DMA_32BIT_MASK) {
11741                 err = pci_set_dma_mask(pdev, dma_mask);
11742                 if (!err) {
11743                         dev->features |= NETIF_F_HIGHDMA;
11744                         err = pci_set_consistent_dma_mask(pdev,
11745                                                           persist_dma_mask);
11746                         if (err < 0) {
11747                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11748                                        "DMA for consistent allocations\n");
11749                                 goto err_out_iounmap;
11750                         }
11751                 }
11752         }
11753         if (err || dma_mask == DMA_32BIT_MASK) {
11754                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11755                 if (err) {
11756                         printk(KERN_ERR PFX "No usable DMA configuration, "
11757                                "aborting.\n");
11758                         goto err_out_iounmap;
11759                 }
11760         }
11761
11762         tg3_init_bufmgr_config(tp);
11763
11764 #if TG3_TSO_SUPPORT != 0
11765         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11766                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11767         }
11768         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11770             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11771             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11772                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11773         } else {
11774                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11775         }
11776
11777         /* TSO is on by default on chips that support hardware TSO.
11778          * Firmware TSO on older chips gives lower performance, so it
11779          * is off by default, but can be enabled using ethtool.
11780          */
11781         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11782                 dev->features |= NETIF_F_TSO;
11783                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11784                         dev->features |= NETIF_F_TSO6;
11785         }
11786
11787 #endif
11788
11789         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11790             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11791             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11792                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11793                 tp->rx_pending = 63;
11794         }
11795
11796         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11797             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11798                 tp->pdev_peer = tg3_find_peer(tp);
11799
11800         err = tg3_get_device_address(tp);
11801         if (err) {
11802                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11803                        "aborting.\n");
11804                 goto err_out_iounmap;
11805         }
11806
11807         /*
11808          * Reset chip in case UNDI or EFI driver did not shutdown
11809          * DMA self test will enable WDMAC and we'll see (spurious)
11810          * pending DMA on the PCI bus at that point.
11811          */
11812         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11813             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11814 #if (LINUX_VERSION_CODE < 0x2060a)
11815                 pci_save_state(tp->pdev, tp->pci_cfg_state);
11816 #else
11817                 pci_save_state(tp->pdev);
11818 #endif
11819                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11820                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11821         }
11822
11823         err = tg3_test_dma(tp);
11824         if (err) {
11825                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11826                 goto err_out_iounmap;
11827         }
11828
11829         /* Tigon3 can do ipv4 only... and some chips have buggy
11830          * checksumming.
11831          */
11832         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11833                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11834                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11835                         dev->features |= NETIF_F_HW_CSUM;
11836                 else
11837                         dev->features |= NETIF_F_IP_CSUM;
11838                 dev->features |= NETIF_F_SG;
11839                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11840         } else
11841                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11842
11843         /* flow control autonegotiation is default behavior */
11844         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11845
11846         tg3_init_coal(tp);
11847
11848         /* Now that we have fully setup the chip, save away a snapshot
11849          * of the PCI config space.  We need to restore this after
11850          * GRC_MISC_CFG core clock resets and some resume events.
11851          */
11852 #if (LINUX_VERSION_CODE < 0x2060a)
11853         pci_save_state(tp->pdev, tp->pci_cfg_state);
11854 #else
11855         pci_save_state(tp->pdev);
11856 #endif
11857
11858         err = register_netdev(dev);
11859         if (err) {
11860                 printk(KERN_ERR PFX "Cannot register net device, "
11861                        "aborting.\n");
11862                 goto err_out_iounmap;
11863         }
11864
11865         pci_set_drvdata(pdev, dev);
11866
11867         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11868                dev->name,
11869                tp->board_part_number,
11870                tp->pci_chip_rev_id,
11871                tg3_phy_string(tp),
11872                tg3_bus_string(tp, str),
11873                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11874
11875         for (i = 0; i < 6; i++)
11876                 printk("%2.2x%c", dev->dev_addr[i],
11877                        i == 5 ? '\n' : ':');
11878
11879         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11880                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11881                "TSOcap[%d] \n",
11882                dev->name,
11883                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11884                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11885                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11886                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11887                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11888                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11889                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11890         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11891                dev->name, tp->dma_rwctrl,
11892                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11893                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11894
11895         netif_carrier_off(tp->dev);
11896
11897         return 0;
11898
11899 err_out_iounmap:
11900         if (tp->regs) {
11901                 iounmap(tp->regs);
11902                 tp->regs = NULL;
11903         }
11904
11905 err_out_free_dev:
11906 #if (LINUX_VERSION_CODE >= 0x20418)
11907         free_netdev(dev);
11908 #else
11909         kfree(dev);
11910 #endif
11911
11912 err_out_free_res:
11913         pci_release_regions(pdev);
11914
11915 err_out_disable_pdev:
11916         pci_disable_device(pdev);
11917         pci_set_drvdata(pdev, NULL);
11918         return err;
11919 }
11920
11921 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11922 {
11923         struct net_device *dev = pci_get_drvdata(pdev);
11924
11925         if (dev) {
11926                 struct tg3 *tp = netdev_priv(dev);
11927
11928 #if (LINUX_VERSION_CODE >= 0x20600)
11929                 flush_scheduled_work();
11930 #endif
11931                 unregister_netdev(dev);
11932                 if (tp->regs) {
11933                         iounmap(tp->regs);
11934                         tp->regs = NULL;
11935                 }
11936 #if (LINUX_VERSION_CODE >= 0x20418)
11937                 free_netdev(dev);
11938 #else
11939                 kfree(dev);
11940 #endif
11941                 pci_release_regions(pdev);
11942                 pci_disable_device(pdev);
11943                 pci_set_drvdata(pdev, NULL);
11944         }
11945 }
11946
11947 #if (LINUX_VERSION_CODE < 0x2060b)
11948 static int tg3_suspend(struct pci_dev *pdev, u32 state)
11949 #else
11950 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11951 #endif
11952 {
11953         struct net_device *dev = pci_get_drvdata(pdev);
11954         struct tg3 *tp = netdev_priv(dev);
11955         int err;
11956
11957         if (!netif_running(dev))
11958                 return 0;
11959
11960 #if (LINUX_VERSION_CODE >= 0x20600)
11961         flush_scheduled_work();
11962 #endif
11963         tg3_netif_stop(tp);
11964
11965         del_timer_sync(&tp->timer);
11966
11967         tg3_full_lock(tp, 1);
11968         tg3_disable_ints(tp);
11969         tg3_full_unlock(tp);
11970
11971         netif_device_detach(dev);
11972
11973         tg3_full_lock(tp, 0);
11974         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11975         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11976         tg3_full_unlock(tp);
11977
11978 #if (LINUX_VERSION_CODE < 0x2060b)
11979         err = tg3_set_power_state(tp, state);
11980 #else
11981         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11982 #endif
11983         if (err) {
11984                 tg3_full_lock(tp, 0);
11985
11986                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11987                 if (tg3_restart_hw(tp, 1))
11988                         goto out;
11989
11990                 tp->timer.expires = jiffies + tp->timer_offset;
11991                 add_timer(&tp->timer);
11992
11993                 netif_device_attach(dev);
11994                 tg3_netif_start(tp);
11995
11996 out:
11997                 tg3_full_unlock(tp);
11998         }
11999
12000         return err;
12001 }
12002
12003 static int tg3_resume(struct pci_dev *pdev)
12004 {
12005         struct net_device *dev = pci_get_drvdata(pdev);
12006         struct tg3 *tp = netdev_priv(dev);
12007         int err;
12008
12009         if (!netif_running(dev))
12010                 return 0;
12011
12012 #if (LINUX_VERSION_CODE < 0x2060a)
12013         pci_restore_state(tp->pdev, tp->pci_cfg_state);
12014 #else
12015         pci_restore_state(tp->pdev);
12016 #endif
12017
12018         err = tg3_set_power_state(tp, PCI_D0);
12019         if (err)
12020                 return err;
12021
12022         netif_device_attach(dev);
12023
12024         tg3_full_lock(tp, 0);
12025
12026         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12027         err = tg3_restart_hw(tp, 1);
12028         if (err)
12029                 goto out;
12030
12031         tp->timer.expires = jiffies + tp->timer_offset;
12032         add_timer(&tp->timer);
12033
12034         tg3_netif_start(tp);
12035
12036 out:
12037         tg3_full_unlock(tp);
12038
12039         return err;
12040 }
12041
12042 static struct pci_driver tg3_driver = {
12043         .name           = DRV_MODULE_NAME,
12044         .id_table       = tg3_pci_tbl,
12045         .probe          = tg3_init_one,
12046         .remove         = __devexit_p(tg3_remove_one),
12047         .suspend        = tg3_suspend,
12048         .resume         = tg3_resume
12049 };
12050
12051 static int __init tg3_init(void)
12052 {
12053         return pci_module_init(&tg3_driver);
12054 }
12055
12056 static void __exit tg3_cleanup(void)
12057 {
12058         pci_unregister_driver(&tg3_driver);
12059 }
12060
12061 module_init(tg3_init);
12062 module_exit(tg3_cleanup);