VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  */
8
9 #include <linux/config.h>
10
11 #include <linux/module.h>
12
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/if_vlan.h>
27 #include <linux/ip.h>
28 #include <linux/tcp.h>
29 #include <linux/workqueue.h>
30
31 #include <net/checksum.h>
32
33 #include <asm/system.h>
34 #include <asm/io.h>
35 #include <asm/byteorder.h>
36 #include <asm/uaccess.h>
37
38 #ifdef CONFIG_SPARC64
39 #include <asm/idprom.h>
40 #include <asm/oplib.h>
41 #include <asm/pbm.h>
42 #endif
43
44 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
45 #define TG3_VLAN_TAG_USED 1
46 #else
47 #define TG3_VLAN_TAG_USED 0
48 #endif
49
50 #ifdef NETIF_F_TSO
51 #define TG3_TSO_SUPPORT 1
52 #else
53 #define TG3_TSO_SUPPORT 0
54 #endif
55
56 #include "tg3.h"
57
58 #define DRV_MODULE_NAME         "tg3"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "3.8"
61 #define DRV_MODULE_RELDATE      "July 14, 2004"
62
63 #define TG3_DEF_MAC_MODE        0
64 #define TG3_DEF_RX_MODE         0
65 #define TG3_DEF_TX_MODE         0
66 #define TG3_DEF_MSG_ENABLE        \
67         (NETIF_MSG_DRV          | \
68          NETIF_MSG_PROBE        | \
69          NETIF_MSG_LINK         | \
70          NETIF_MSG_TIMER        | \
71          NETIF_MSG_IFDOWN       | \
72          NETIF_MSG_IFUP         | \
73          NETIF_MSG_RX_ERR       | \
74          NETIF_MSG_TX_ERR)
75
76 /* length of time before we decide the hardware is borked,
77  * and dev->tx_timeout() should be called to fix the problem
78  */
79 #define TG3_TX_TIMEOUT                  (5 * HZ)
80
81 /* hardware minimum and maximum for a single frame's data payload */
82 #define TG3_MIN_MTU                     60
83 #define TG3_MAX_MTU(tp) \
84         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
85           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
86
87 /* These numbers seem to be hard coded in the NIC firmware somehow.
88  * You can't change the ring sizes, but you can change where you place
89  * them in the NIC onboard memory.
90  */
91 #define TG3_RX_RING_SIZE                512
92 #define TG3_DEF_RX_RING_PENDING         200
93 #define TG3_RX_JUMBO_RING_SIZE          256
94 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
95
96 /* Do not place this n-ring entries value into the tp struct itself,
97  * we really want to expose these constants to GCC so that modulo et
98  * al.  operations are done with shifts and masks instead of with
99  * hw multiply/modulo instructions.  Another solution would be to
100  * replace things like '% foo' with '& (foo - 1)'.
101  */
102 #define TG3_RX_RCB_RING_SIZE(tp)        \
103         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
104           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
105          512 : 1024)
106
107 #define TG3_TX_RING_SIZE                512
108 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
109
110 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
111                                  TG3_RX_RING_SIZE)
112 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
113                                  TG3_RX_JUMBO_RING_SIZE)
114 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
115                                    TG3_RX_RCB_RING_SIZE(tp))
116 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
117                                  TG3_TX_RING_SIZE)
118 #define TX_RING_GAP(TP) \
119         (TG3_TX_RING_SIZE - (TP)->tx_pending)
120 #define TX_BUFFS_AVAIL(TP)                                              \
121         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
122           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
123           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_PARM(tg3_debug, "i");
142 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145
146 static struct pci_device_id tg3_pci_tbl[] = {
147         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
148           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { 0, }
220 };
221
222 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
223
224 struct {
225         char string[ETH_GSTRING_LEN];
226 } ethtool_stats_keys[TG3_NUM_STATS] = {
227         { "rx_octets" },
228         { "rx_fragments" },
229         { "rx_ucast_packets" },
230         { "rx_mcast_packets" },
231         { "rx_bcast_packets" },
232         { "rx_fcs_errors" },
233         { "rx_align_errors" },
234         { "rx_xon_pause_rcvd" },
235         { "rx_xoff_pause_rcvd" },
236         { "rx_mac_ctrl_rcvd" },
237         { "rx_xoff_entered" },
238         { "rx_frame_too_long_errors" },
239         { "rx_jabbers" },
240         { "rx_undersize_packets" },
241         { "rx_in_length_errors" },
242         { "rx_out_length_errors" },
243         { "rx_64_or_less_octet_packets" },
244         { "rx_65_to_127_octet_packets" },
245         { "rx_128_to_255_octet_packets" },
246         { "rx_256_to_511_octet_packets" },
247         { "rx_512_to_1023_octet_packets" },
248         { "rx_1024_to_1522_octet_packets" },
249         { "rx_1523_to_2047_octet_packets" },
250         { "rx_2048_to_4095_octet_packets" },
251         { "rx_4096_to_8191_octet_packets" },
252         { "rx_8192_to_9022_octet_packets" },
253
254         { "tx_octets" },
255         { "tx_collisions" },
256
257         { "tx_xon_sent" },
258         { "tx_xoff_sent" },
259         { "tx_flow_control" },
260         { "tx_mac_errors" },
261         { "tx_single_collisions" },
262         { "tx_mult_collisions" },
263         { "tx_deferred" },
264         { "tx_excessive_collisions" },
265         { "tx_late_collisions" },
266         { "tx_collide_2times" },
267         { "tx_collide_3times" },
268         { "tx_collide_4times" },
269         { "tx_collide_5times" },
270         { "tx_collide_6times" },
271         { "tx_collide_7times" },
272         { "tx_collide_8times" },
273         { "tx_collide_9times" },
274         { "tx_collide_10times" },
275         { "tx_collide_11times" },
276         { "tx_collide_12times" },
277         { "tx_collide_13times" },
278         { "tx_collide_14times" },
279         { "tx_collide_15times" },
280         { "tx_ucast_packets" },
281         { "tx_mcast_packets" },
282         { "tx_bcast_packets" },
283         { "tx_carrier_sense_errors" },
284         { "tx_discards" },
285         { "tx_errors" },
286
287         { "dma_writeq_full" },
288         { "dma_write_prioq_full" },
289         { "rxbds_empty" },
290         { "rx_discards" },
291         { "rx_errors" },
292         { "rx_threshold_hit" },
293
294         { "dma_readq_full" },
295         { "dma_read_prioq_full" },
296         { "tx_comp_queue_full" },
297
298         { "ring_set_send_prod_index" },
299         { "ring_status_update" },
300         { "nic_irqs" },
301         { "nic_avoided_irqs" },
302         { "nic_tx_threshold_hit" }
303 };
304
305 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
306 {
307         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
308                 unsigned long flags;
309
310                 spin_lock_irqsave(&tp->indirect_lock, flags);
311                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
312                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
313                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
314         } else {
315                 writel(val, tp->regs + off);
316                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
317                         readl(tp->regs + off);
318         }
319 }
320
321 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
322 {
323         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
324                 unsigned long flags;
325
326                 spin_lock_irqsave(&tp->indirect_lock, flags);
327                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
328                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
329                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
330         } else {
331                 unsigned long dest = tp->regs + off;
332                 writel(val, dest);
333                 readl(dest);    /* always flush PCI write */
334         }
335 }
336
337 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
338 {
339         unsigned long mbox = tp->regs + off;
340         writel(val, mbox);
341         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
342                 readl(mbox);
343 }
344
345 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
346 {
347         unsigned long mbox = tp->regs + off;
348         writel(val, mbox);
349         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
350                 writel(val, mbox);
351         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
352                 readl(mbox);
353 }
354
355 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
356 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
357 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
358
359 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
360 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
361 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
362 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
363 #define tr32(reg)               readl(tp->regs + (reg))
364 #define tr16(reg)               readw(tp->regs + (reg))
365 #define tr8(reg)                readb(tp->regs + (reg))
366
367 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
368 {
369         unsigned long flags;
370
371         spin_lock_irqsave(&tp->indirect_lock, flags);
372         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
373         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
374
375         /* Always leave this as zero. */
376         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
377         spin_unlock_irqrestore(&tp->indirect_lock, flags);
378 }
379
380 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
381 {
382         unsigned long flags;
383
384         spin_lock_irqsave(&tp->indirect_lock, flags);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
386         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
387
388         /* Always leave this as zero. */
389         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
390         spin_unlock_irqrestore(&tp->indirect_lock, flags);
391 }
392
393 static void tg3_disable_ints(struct tg3 *tp)
394 {
395         tw32(TG3PCI_MISC_HOST_CTRL,
396              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
397         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
398         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
399 }
400
401 static inline void tg3_cond_int(struct tg3 *tp)
402 {
403         if (tp->hw_status->status & SD_STATUS_UPDATED)
404                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
405 }
406
407 static void tg3_enable_ints(struct tg3 *tp)
408 {
409         tw32(TG3PCI_MISC_HOST_CTRL,
410              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
411         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
412         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
413
414         tg3_cond_int(tp);
415 }
416
417 static inline void tg3_netif_stop(struct tg3 *tp)
418 {
419         netif_poll_disable(tp->dev);
420         netif_tx_disable(tp->dev);
421 }
422
423 static inline void tg3_netif_start(struct tg3 *tp)
424 {
425         netif_wake_queue(tp->dev);
426         /* NOTE: unconditional netif_wake_queue is only appropriate
427          * so long as all callers are assured to have free tx slots
428          * (such as after tg3_init_hw)
429          */
430         netif_poll_enable(tp->dev);
431         tg3_cond_int(tp);
432 }
433
434 static void tg3_switch_clocks(struct tg3 *tp)
435 {
436         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
437         u32 orig_clock_ctrl;
438
439         orig_clock_ctrl = clock_ctrl;
440         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
441                        CLOCK_CTRL_CLKRUN_OENABLE |
442                        0x1f);
443         tp->pci_clock_ctrl = clock_ctrl;
444
445         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
446             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
447             (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
448                 tw32_f(TG3PCI_CLOCK_CTRL,
449                      clock_ctrl |
450                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
451                 udelay(40);
452                 tw32_f(TG3PCI_CLOCK_CTRL,
453                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
454                 udelay(40);
455         }
456         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
457         udelay(40);
458 }
459
460 #define PHY_BUSY_LOOPS  5000
461
462 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
463 {
464         u32 frame_val;
465         int loops, ret;
466
467         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
468                 tw32_f(MAC_MI_MODE,
469                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
470                 udelay(80);
471         }
472
473         *val = 0xffffffff;
474
475         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
476                       MI_COM_PHY_ADDR_MASK);
477         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
478                       MI_COM_REG_ADDR_MASK);
479         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
480         
481         tw32_f(MAC_MI_COM, frame_val);
482
483         loops = PHY_BUSY_LOOPS;
484         while (loops-- > 0) {
485                 udelay(10);
486                 frame_val = tr32(MAC_MI_COM);
487
488                 if ((frame_val & MI_COM_BUSY) == 0) {
489                         udelay(5);
490                         frame_val = tr32(MAC_MI_COM);
491                         break;
492                 }
493         }
494
495         ret = -EBUSY;
496         if (loops > 0) {
497                 *val = frame_val & MI_COM_DATA_MASK;
498                 ret = 0;
499         }
500
501         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
502                 tw32_f(MAC_MI_MODE, tp->mi_mode);
503                 udelay(80);
504         }
505
506         return ret;
507 }
508
509 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
510 {
511         u32 frame_val;
512         int loops, ret;
513
514         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
515                 tw32_f(MAC_MI_MODE,
516                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
517                 udelay(80);
518         }
519
520         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
521                       MI_COM_PHY_ADDR_MASK);
522         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
523                       MI_COM_REG_ADDR_MASK);
524         frame_val |= (val & MI_COM_DATA_MASK);
525         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
526         
527         tw32_f(MAC_MI_COM, frame_val);
528
529         loops = PHY_BUSY_LOOPS;
530         while (loops-- > 0) {
531                 udelay(10);
532                 frame_val = tr32(MAC_MI_COM);
533                 if ((frame_val & MI_COM_BUSY) == 0) {
534                         udelay(5);
535                         frame_val = tr32(MAC_MI_COM);
536                         break;
537                 }
538         }
539
540         ret = -EBUSY;
541         if (loops > 0)
542                 ret = 0;
543
544         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
545                 tw32_f(MAC_MI_MODE, tp->mi_mode);
546                 udelay(80);
547         }
548
549         return ret;
550 }
551
552 static void tg3_phy_set_wirespeed(struct tg3 *tp)
553 {
554         u32 val;
555
556         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
557                 return;
558
559         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
560         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
561         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
562 }
563
564 static int tg3_bmcr_reset(struct tg3 *tp)
565 {
566         u32 phy_control;
567         int limit, err;
568
569         /* OK, reset it, and poll the BMCR_RESET bit until it
570          * clears or we time out.
571          */
572         phy_control = BMCR_RESET;
573         err = tg3_writephy(tp, MII_BMCR, phy_control);
574         if (err != 0)
575                 return -EBUSY;
576
577         limit = 5000;
578         while (limit--) {
579                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
580                 if (err != 0)
581                         return -EBUSY;
582
583                 if ((phy_control & BMCR_RESET) == 0) {
584                         udelay(40);
585                         break;
586                 }
587                 udelay(10);
588         }
589         if (limit <= 0)
590                 return -EBUSY;
591
592         return 0;
593 }
594
595 static int tg3_wait_macro_done(struct tg3 *tp)
596 {
597         int limit = 100;
598
599         while (limit--) {
600                 u32 tmp32;
601
602                 tg3_readphy(tp, 0x16, &tmp32);
603                 if ((tmp32 & 0x1000) == 0)
604                         break;
605         }
606         if (limit <= 0)
607                 return -EBUSY;
608
609         return 0;
610 }
611
612 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
613 {
614         static const u32 test_pat[4][6] = {
615         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
616         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
617         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
618         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
619         };
620         int chan;
621
622         for (chan = 0; chan < 4; chan++) {
623                 int i;
624
625                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
626                              (chan * 0x2000) | 0x0200);
627                 tg3_writephy(tp, 0x16, 0x0002);
628
629                 for (i = 0; i < 6; i++)
630                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
631                                      test_pat[chan][i]);
632
633                 tg3_writephy(tp, 0x16, 0x0202);
634                 if (tg3_wait_macro_done(tp)) {
635                         *resetp = 1;
636                         return -EBUSY;
637                 }
638
639                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
640                              (chan * 0x2000) | 0x0200);
641                 tg3_writephy(tp, 0x16, 0x0082);
642                 if (tg3_wait_macro_done(tp)) {
643                         *resetp = 1;
644                         return -EBUSY;
645                 }
646
647                 tg3_writephy(tp, 0x16, 0x0802);
648                 if (tg3_wait_macro_done(tp)) {
649                         *resetp = 1;
650                         return -EBUSY;
651                 }
652
653                 for (i = 0; i < 6; i += 2) {
654                         u32 low, high;
655
656                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
657                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
658                         if (tg3_wait_macro_done(tp)) {
659                                 *resetp = 1;
660                                 return -EBUSY;
661                         }
662                         low &= 0x7fff;
663                         high &= 0x000f;
664                         if (low != test_pat[chan][i] ||
665                             high != test_pat[chan][i+1]) {
666                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
667                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
668                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
669
670                                 return -EBUSY;
671                         }
672                 }
673         }
674
675         return 0;
676 }
677
678 static int tg3_phy_reset_chanpat(struct tg3 *tp)
679 {
680         int chan;
681
682         for (chan = 0; chan < 4; chan++) {
683                 int i;
684
685                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
686                              (chan * 0x2000) | 0x0200);
687                 tg3_writephy(tp, 0x16, 0x0002);
688                 for (i = 0; i < 6; i++)
689                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
690                 tg3_writephy(tp, 0x16, 0x0202);
691                 if (tg3_wait_macro_done(tp))
692                         return -EBUSY;
693         }
694
695         return 0;
696 }
697
698 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
699 {
700         u32 reg32, phy9_orig;
701         int retries, do_phy_reset, err;
702
703         retries = 10;
704         do_phy_reset = 1;
705         do {
706                 if (do_phy_reset) {
707                         err = tg3_bmcr_reset(tp);
708                         if (err)
709                                 return err;
710                         do_phy_reset = 0;
711                 }
712
713                 /* Disable transmitter and interrupt.  */
714                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
715                 reg32 |= 0x3000;
716                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
717
718                 /* Set full-duplex, 1000 mbps.  */
719                 tg3_writephy(tp, MII_BMCR,
720                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
721
722                 /* Set to master mode.  */
723                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
724                 tg3_writephy(tp, MII_TG3_CTRL,
725                              (MII_TG3_CTRL_AS_MASTER |
726                               MII_TG3_CTRL_ENABLE_AS_MASTER));
727
728                 /* Enable SM_DSP_CLOCK and 6dB.  */
729                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
730
731                 /* Block the PHY control access.  */
732                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
733                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
734
735                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
736                 if (!err)
737                         break;
738         } while (--retries);
739
740         err = tg3_phy_reset_chanpat(tp);
741         if (err)
742                 return err;
743
744         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
745         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
746
747         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
748         tg3_writephy(tp, 0x16, 0x0000);
749
750         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
751             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
752                 /* Set Extended packet length bit for jumbo frames */
753                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
754         }
755         else {
756                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
757         }
758
759         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
760
761         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
762         reg32 &= ~0x3000;
763         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
764
765         return err;
766 }
767
768 /* This will reset the tigon3 PHY if there is no valid
769  * link unless the FORCE argument is non-zero.
770  */
771 static int tg3_phy_reset(struct tg3 *tp)
772 {
773         u32 phy_status;
774         int err;
775
776         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
777         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
778         if (err != 0)
779                 return -EBUSY;
780
781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
782             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
783             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
784                 err = tg3_phy_reset_5703_4_5(tp);
785                 if (err)
786                         return err;
787                 goto out;
788         }
789
790         err = tg3_bmcr_reset(tp);
791         if (err)
792                 return err;
793
794 out:
795         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
797                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
798                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
799                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
800                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
801                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
802         }
803         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
804                 tg3_writephy(tp, 0x1c, 0x8d68);
805                 tg3_writephy(tp, 0x1c, 0x8d68);
806         }
807         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
808                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
809                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
810                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
811                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
812                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
813                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
814                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
815                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
816         }
817         /* Set Extended packet length bit (bit 14) on all chips that */
818         /* support jumbo frames */
819         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
820                 /* Cannot do read-modify-write on 5401 */
821                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
822         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
823                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
824                 u32 phy_reg;
825
826                 /* Set bit 14 with read-modify-write to preserve other bits */
827                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
828                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
830         }
831         tg3_phy_set_wirespeed(tp);
832         return 0;
833 }
834
835 static void tg3_frob_aux_power(struct tg3 *tp)
836 {
837         struct tg3 *tp_peer = tp;
838
839         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
840                 return;
841
842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
843                 tp_peer = pci_get_drvdata(tp->pdev_peer);
844                 if (!tp_peer)
845                         BUG();
846         }
847
848
849         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
850             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
851                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
852                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
853                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
854                              (GRC_LCLCTRL_GPIO_OE0 |
855                               GRC_LCLCTRL_GPIO_OE1 |
856                               GRC_LCLCTRL_GPIO_OE2 |
857                               GRC_LCLCTRL_GPIO_OUTPUT0 |
858                               GRC_LCLCTRL_GPIO_OUTPUT1));
859                         udelay(100);
860                 } else {
861                         if (tp_peer != tp &&
862                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
863                                 return;
864
865                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
866                              (GRC_LCLCTRL_GPIO_OE0 |
867                               GRC_LCLCTRL_GPIO_OE1 |
868                               GRC_LCLCTRL_GPIO_OE2 |
869                               GRC_LCLCTRL_GPIO_OUTPUT1 |
870                               GRC_LCLCTRL_GPIO_OUTPUT2));
871                         udelay(100);
872
873                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
874                              (GRC_LCLCTRL_GPIO_OE0 |
875                               GRC_LCLCTRL_GPIO_OE1 |
876                               GRC_LCLCTRL_GPIO_OE2 |
877                               GRC_LCLCTRL_GPIO_OUTPUT0 |
878                               GRC_LCLCTRL_GPIO_OUTPUT1 |
879                               GRC_LCLCTRL_GPIO_OUTPUT2));
880                         udelay(100);
881
882                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
883                              (GRC_LCLCTRL_GPIO_OE0 |
884                               GRC_LCLCTRL_GPIO_OE1 |
885                               GRC_LCLCTRL_GPIO_OE2 |
886                               GRC_LCLCTRL_GPIO_OUTPUT0 |
887                               GRC_LCLCTRL_GPIO_OUTPUT1));
888                         udelay(100);
889                 }
890         } else {
891                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
892                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
893                         if (tp_peer != tp &&
894                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
895                                 return;
896
897                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
898                              (GRC_LCLCTRL_GPIO_OE1 |
899                               GRC_LCLCTRL_GPIO_OUTPUT1));
900                         udelay(100);
901
902                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
903                              (GRC_LCLCTRL_GPIO_OE1));
904                         udelay(100);
905
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE1 |
908                               GRC_LCLCTRL_GPIO_OUTPUT1));
909                         udelay(100);
910                 }
911         }
912 }
913
914 static int tg3_setup_phy(struct tg3 *, int);
915
916 #define RESET_KIND_SHUTDOWN     0
917 #define RESET_KIND_INIT         1
918 #define RESET_KIND_SUSPEND      2
919
920 static void tg3_write_sig_post_reset(struct tg3 *, int);
921
922 static int tg3_set_power_state(struct tg3 *tp, int state)
923 {
924         u32 misc_host_ctrl;
925         u16 power_control, power_caps;
926         int pm = tp->pm_cap;
927
928         /* Make sure register accesses (indirect or otherwise)
929          * will function correctly.
930          */
931         pci_write_config_dword(tp->pdev,
932                                TG3PCI_MISC_HOST_CTRL,
933                                tp->misc_host_ctrl);
934
935         pci_read_config_word(tp->pdev,
936                              pm + PCI_PM_CTRL,
937                              &power_control);
938         power_control |= PCI_PM_CTRL_PME_STATUS;
939         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
940         switch (state) {
941         case 0:
942                 power_control |= 0;
943                 pci_write_config_word(tp->pdev,
944                                       pm + PCI_PM_CTRL,
945                                       power_control);
946                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
947                 udelay(100);
948
949                 return 0;
950
951         case 1:
952                 power_control |= 1;
953                 break;
954
955         case 2:
956                 power_control |= 2;
957                 break;
958
959         case 3:
960                 power_control |= 3;
961                 break;
962
963         default:
964                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
965                        "requested.\n",
966                        tp->dev->name, state);
967                 return -EINVAL;
968         };
969
970         power_control |= PCI_PM_CTRL_PME_ENABLE;
971
972         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
973         tw32(TG3PCI_MISC_HOST_CTRL,
974              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
975
976         if (tp->link_config.phy_is_low_power == 0) {
977                 tp->link_config.phy_is_low_power = 1;
978                 tp->link_config.orig_speed = tp->link_config.speed;
979                 tp->link_config.orig_duplex = tp->link_config.duplex;
980                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
981         }
982
983         if (tp->phy_id != PHY_ID_SERDES) {
984                 tp->link_config.speed = SPEED_10;
985                 tp->link_config.duplex = DUPLEX_HALF;
986                 tp->link_config.autoneg = AUTONEG_ENABLE;
987                 tg3_setup_phy(tp, 0);
988         }
989
990         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
991
992         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
993                 u32 mac_mode;
994
995                 if (tp->phy_id != PHY_ID_SERDES) {
996                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
997                         udelay(40);
998
999                         mac_mode = MAC_MODE_PORT_MODE_MII;
1000
1001                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1002                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1003                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1004                 } else {
1005                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1006                 }
1007
1008                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1009                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1010
1011                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1012                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1013                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1014
1015                 tw32_f(MAC_MODE, mac_mode);
1016                 udelay(100);
1017
1018                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1019                 udelay(10);
1020         }
1021
1022         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1023             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1024              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1025                 u32 base_val;
1026
1027                 base_val = tp->pci_clock_ctrl;
1028                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1029                              CLOCK_CTRL_TXCLK_DISABLE);
1030
1031                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1032                      CLOCK_CTRL_ALTCLK |
1033                      CLOCK_CTRL_PWRDOWN_PLL133);
1034                 udelay(40);
1035         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1036                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1037                 u32 newbits1, newbits2;
1038
1039                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1040                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1041                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1042                                     CLOCK_CTRL_TXCLK_DISABLE |
1043                                     CLOCK_CTRL_ALTCLK);
1044                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1045                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1046                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1047                         newbits1 = CLOCK_CTRL_625_CORE;
1048                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1049                 } else {
1050                         newbits1 = CLOCK_CTRL_ALTCLK;
1051                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1052                 }
1053
1054                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1055                 udelay(40);
1056
1057                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1058                 udelay(40);
1059
1060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1061                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1062                         u32 newbits3;
1063
1064                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1065                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1066                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1067                                             CLOCK_CTRL_TXCLK_DISABLE |
1068                                             CLOCK_CTRL_44MHZ_CORE);
1069                         } else {
1070                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1071                         }
1072
1073                         tw32_f(TG3PCI_CLOCK_CTRL,
1074                                          tp->pci_clock_ctrl | newbits3);
1075                         udelay(40);
1076                 }
1077         }
1078
1079         tg3_frob_aux_power(tp);
1080
1081         /* Finally, set the new power state. */
1082         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1083
1084         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1085
1086         return 0;
1087 }
1088
1089 static void tg3_link_report(struct tg3 *tp)
1090 {
1091         if (!netif_carrier_ok(tp->dev)) {
1092                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1093         } else {
1094                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1095                        tp->dev->name,
1096                        (tp->link_config.active_speed == SPEED_1000 ?
1097                         1000 :
1098                         (tp->link_config.active_speed == SPEED_100 ?
1099                          100 : 10)),
1100                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1101                         "full" : "half"));
1102
1103                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1104                        "%s for RX.\n",
1105                        tp->dev->name,
1106                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1107                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1108         }
1109 }
1110
1111 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1112 {
1113         u32 new_tg3_flags = 0;
1114         u32 old_rx_mode = tp->rx_mode;
1115         u32 old_tx_mode = tp->tx_mode;
1116
1117         if (local_adv & ADVERTISE_PAUSE_CAP) {
1118                 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1119                         if (remote_adv & LPA_PAUSE_CAP)
1120                                 new_tg3_flags |=
1121                                         (TG3_FLAG_RX_PAUSE |
1122                                          TG3_FLAG_TX_PAUSE);
1123                         else if (remote_adv & LPA_PAUSE_ASYM)
1124                                 new_tg3_flags |=
1125                                         (TG3_FLAG_RX_PAUSE);
1126                 } else {
1127                         if (remote_adv & LPA_PAUSE_CAP)
1128                                 new_tg3_flags |=
1129                                         (TG3_FLAG_RX_PAUSE |
1130                                          TG3_FLAG_TX_PAUSE);
1131                 }
1132         } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1133                 if ((remote_adv & LPA_PAUSE_CAP) &&
1134                     (remote_adv & LPA_PAUSE_ASYM))
1135                         new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1136         }
1137
1138         tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1139         tp->tg3_flags |= new_tg3_flags;
1140
1141         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1142                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1143         else
1144                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1145
1146         if (old_rx_mode != tp->rx_mode) {
1147                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1148         }
1149         
1150         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1151                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1152         else
1153                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1154
1155         if (old_tx_mode != tp->tx_mode) {
1156                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1157         }
1158 }
1159
1160 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1161 {
1162         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1163         case MII_TG3_AUX_STAT_10HALF:
1164                 *speed = SPEED_10;
1165                 *duplex = DUPLEX_HALF;
1166                 break;
1167
1168         case MII_TG3_AUX_STAT_10FULL:
1169                 *speed = SPEED_10;
1170                 *duplex = DUPLEX_FULL;
1171                 break;
1172
1173         case MII_TG3_AUX_STAT_100HALF:
1174                 *speed = SPEED_100;
1175                 *duplex = DUPLEX_HALF;
1176                 break;
1177
1178         case MII_TG3_AUX_STAT_100FULL:
1179                 *speed = SPEED_100;
1180                 *duplex = DUPLEX_FULL;
1181                 break;
1182
1183         case MII_TG3_AUX_STAT_1000HALF:
1184                 *speed = SPEED_1000;
1185                 *duplex = DUPLEX_HALF;
1186                 break;
1187
1188         case MII_TG3_AUX_STAT_1000FULL:
1189                 *speed = SPEED_1000;
1190                 *duplex = DUPLEX_FULL;
1191                 break;
1192
1193         default:
1194                 *speed = SPEED_INVALID;
1195                 *duplex = DUPLEX_INVALID;
1196                 break;
1197         };
1198 }
1199
1200 static int tg3_phy_copper_begin(struct tg3 *tp)
1201 {
1202         u32 new_adv;
1203         int i;
1204
1205         if (tp->link_config.phy_is_low_power) {
1206                 /* Entering low power mode.  Disable gigabit and
1207                  * 100baseT advertisements.
1208                  */
1209                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1210
1211                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1212                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1213                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1214                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1215
1216                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1217         } else if (tp->link_config.speed == SPEED_INVALID) {
1218                 tp->link_config.advertising =
1219                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1220                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1221                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1222                          ADVERTISED_Autoneg | ADVERTISED_MII);
1223
1224                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1225                         tp->link_config.advertising &=
1226                                 ~(ADVERTISED_1000baseT_Half |
1227                                   ADVERTISED_1000baseT_Full);
1228
1229                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1230                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1231                         new_adv |= ADVERTISE_10HALF;
1232                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1233                         new_adv |= ADVERTISE_10FULL;
1234                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1235                         new_adv |= ADVERTISE_100HALF;
1236                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1237                         new_adv |= ADVERTISE_100FULL;
1238                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1239
1240                 if (tp->link_config.advertising &
1241                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1242                         new_adv = 0;
1243                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1244                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1245                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1246                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1247                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1248                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1249                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1250                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1251                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1252                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1253                 } else {
1254                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1255                 }
1256         } else {
1257                 /* Asking for a specific link mode. */
1258                 if (tp->link_config.speed == SPEED_1000) {
1259                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1260                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1261
1262                         if (tp->link_config.duplex == DUPLEX_FULL)
1263                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1264                         else
1265                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1266                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1267                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1268                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1269                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1270                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1271                 } else {
1272                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1273
1274                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1275                         if (tp->link_config.speed == SPEED_100) {
1276                                 if (tp->link_config.duplex == DUPLEX_FULL)
1277                                         new_adv |= ADVERTISE_100FULL;
1278                                 else
1279                                         new_adv |= ADVERTISE_100HALF;
1280                         } else {
1281                                 if (tp->link_config.duplex == DUPLEX_FULL)
1282                                         new_adv |= ADVERTISE_10FULL;
1283                                 else
1284                                         new_adv |= ADVERTISE_10HALF;
1285                         }
1286                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1287                 }
1288         }
1289
1290         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1291             tp->link_config.speed != SPEED_INVALID) {
1292                 u32 bmcr, orig_bmcr;
1293
1294                 tp->link_config.active_speed = tp->link_config.speed;
1295                 tp->link_config.active_duplex = tp->link_config.duplex;
1296
1297                 bmcr = 0;
1298                 switch (tp->link_config.speed) {
1299                 default:
1300                 case SPEED_10:
1301                         break;
1302
1303                 case SPEED_100:
1304                         bmcr |= BMCR_SPEED100;
1305                         break;
1306
1307                 case SPEED_1000:
1308                         bmcr |= TG3_BMCR_SPEED1000;
1309                         break;
1310                 };
1311
1312                 if (tp->link_config.duplex == DUPLEX_FULL)
1313                         bmcr |= BMCR_FULLDPLX;
1314
1315                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1316                 if (bmcr != orig_bmcr) {
1317                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1318                         for (i = 0; i < 1500; i++) {
1319                                 u32 tmp;
1320
1321                                 udelay(10);
1322                                 tg3_readphy(tp, MII_BMSR, &tmp);
1323                                 tg3_readphy(tp, MII_BMSR, &tmp);
1324                                 if (!(tmp & BMSR_LSTATUS)) {
1325                                         udelay(40);
1326                                         break;
1327                                 }
1328                         }
1329                         tg3_writephy(tp, MII_BMCR, bmcr);
1330                         udelay(40);
1331                 }
1332         } else {
1333                 tg3_writephy(tp, MII_BMCR,
1334                              BMCR_ANENABLE | BMCR_ANRESTART);
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1341 {
1342         int err;
1343
1344         /* Turn off tap power management. */
1345         /* Set Extended packet length bit */
1346         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1347
1348         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1349         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1350
1351         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1352         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1353
1354         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1355         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1356
1357         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1358         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1359
1360         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1361         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1362
1363         udelay(40);
1364
1365         return err;
1366 }
1367
1368 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1369 {
1370         u32 adv_reg, all_mask;
1371
1372         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1373         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1374                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1375         if ((adv_reg & all_mask) != all_mask)
1376                 return 0;
1377         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1378                 u32 tg3_ctrl;
1379
1380                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1381                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1382                             MII_TG3_CTRL_ADV_1000_FULL);
1383                 if ((tg3_ctrl & all_mask) != all_mask)
1384                         return 0;
1385         }
1386         return 1;
1387 }
1388
1389 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1390 {
1391         int current_link_up;
1392         u32 bmsr, dummy;
1393         u16 current_speed;
1394         u8 current_duplex;
1395         int i, err;
1396
1397         tw32(MAC_EVENT, 0);
1398
1399         tw32_f(MAC_STATUS,
1400              (MAC_STATUS_SYNC_CHANGED |
1401               MAC_STATUS_CFG_CHANGED |
1402               MAC_STATUS_MI_COMPLETION |
1403               MAC_STATUS_LNKSTATE_CHANGED));
1404         udelay(40);
1405
1406         tp->mi_mode = MAC_MI_MODE_BASE;
1407         tw32_f(MAC_MI_MODE, tp->mi_mode);
1408         udelay(80);
1409
1410         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1411
1412         /* Some third-party PHYs need to be reset on link going
1413          * down.
1414          */
1415         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1416              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1417              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1418             netif_carrier_ok(tp->dev)) {
1419                 tg3_readphy(tp, MII_BMSR, &bmsr);
1420                 tg3_readphy(tp, MII_BMSR, &bmsr);
1421                 if (!(bmsr & BMSR_LSTATUS))
1422                         force_reset = 1;
1423         }
1424         if (force_reset)
1425                 tg3_phy_reset(tp);
1426
1427         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1428                 tg3_readphy(tp, MII_BMSR, &bmsr);
1429                 tg3_readphy(tp, MII_BMSR, &bmsr);
1430
1431                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1432                         bmsr = 0;
1433
1434                 if (!(bmsr & BMSR_LSTATUS)) {
1435                         err = tg3_init_5401phy_dsp(tp);
1436                         if (err)
1437                                 return err;
1438
1439                         tg3_readphy(tp, MII_BMSR, &bmsr);
1440                         for (i = 0; i < 1000; i++) {
1441                                 udelay(10);
1442                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1443                                 if (bmsr & BMSR_LSTATUS) {
1444                                         udelay(40);
1445                                         break;
1446                                 }
1447                         }
1448
1449                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1450                             !(bmsr & BMSR_LSTATUS) &&
1451                             tp->link_config.active_speed == SPEED_1000) {
1452                                 err = tg3_phy_reset(tp);
1453                                 if (!err)
1454                                         err = tg3_init_5401phy_dsp(tp);
1455                                 if (err)
1456                                         return err;
1457                         }
1458                 }
1459         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1460                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1461                 /* 5701 {A0,B0} CRC bug workaround */
1462                 tg3_writephy(tp, 0x15, 0x0a75);
1463                 tg3_writephy(tp, 0x1c, 0x8c68);
1464                 tg3_writephy(tp, 0x1c, 0x8d68);
1465                 tg3_writephy(tp, 0x1c, 0x8c68);
1466         }
1467
1468         /* Clear pending interrupts... */
1469         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1470         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1471
1472         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1473                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1474         else
1475                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1476
1477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1478             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1479                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1480                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1481                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1482                 else
1483                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1484         }
1485
1486         current_link_up = 0;
1487         current_speed = SPEED_INVALID;
1488         current_duplex = DUPLEX_INVALID;
1489
1490         bmsr = 0;
1491         for (i = 0; i < 100; i++) {
1492                 tg3_readphy(tp, MII_BMSR, &bmsr);
1493                 tg3_readphy(tp, MII_BMSR, &bmsr);
1494                 if (bmsr & BMSR_LSTATUS)
1495                         break;
1496                 udelay(40);
1497         }
1498
1499         if (bmsr & BMSR_LSTATUS) {
1500                 u32 aux_stat, bmcr;
1501
1502                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1503                 for (i = 0; i < 2000; i++) {
1504                         udelay(10);
1505                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1506                         if (aux_stat)
1507                                 break;
1508                 }
1509
1510                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1511                                              &current_speed,
1512                                              &current_duplex);
1513
1514                 bmcr = 0;
1515                 for (i = 0; i < 200; i++) {
1516                         tg3_readphy(tp, MII_BMCR, &bmcr);
1517                         tg3_readphy(tp, MII_BMCR, &bmcr);
1518                         if (bmcr && bmcr != 0x7fff)
1519                                 break;
1520                         udelay(10);
1521                 }
1522
1523                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1524                         if (bmcr & BMCR_ANENABLE) {
1525                                 current_link_up = 1;
1526
1527                                 /* Force autoneg restart if we are exiting
1528                                  * low power mode.
1529                                  */
1530                                 if (!tg3_copper_is_advertising_all(tp))
1531                                         current_link_up = 0;
1532                         } else {
1533                                 current_link_up = 0;
1534                         }
1535                 } else {
1536                         if (!(bmcr & BMCR_ANENABLE) &&
1537                             tp->link_config.speed == current_speed &&
1538                             tp->link_config.duplex == current_duplex) {
1539                                 current_link_up = 1;
1540                         } else {
1541                                 current_link_up = 0;
1542                         }
1543                 }
1544
1545                 tp->link_config.active_speed = current_speed;
1546                 tp->link_config.active_duplex = current_duplex;
1547         }
1548
1549         if (current_link_up == 1 &&
1550             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1551             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1552                 u32 local_adv, remote_adv;
1553
1554                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1555                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1556
1557                 tg3_readphy(tp, MII_LPA, &remote_adv);
1558                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1559
1560                 /* If we are not advertising full pause capability,
1561                  * something is wrong.  Bring the link down and reconfigure.
1562                  */
1563                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1564                         current_link_up = 0;
1565                 } else {
1566                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1567                 }
1568         }
1569
1570         if (current_link_up == 0) {
1571                 u32 tmp;
1572
1573                 tg3_phy_copper_begin(tp);
1574
1575                 tg3_readphy(tp, MII_BMSR, &tmp);
1576                 tg3_readphy(tp, MII_BMSR, &tmp);
1577                 if (tmp & BMSR_LSTATUS)
1578                         current_link_up = 1;
1579         }
1580
1581         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1582         if (current_link_up == 1) {
1583                 if (tp->link_config.active_speed == SPEED_100 ||
1584                     tp->link_config.active_speed == SPEED_10)
1585                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1586                 else
1587                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1588         } else
1589                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1590
1591         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1592         if (tp->link_config.active_duplex == DUPLEX_HALF)
1593                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1594
1595         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1597                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1598                     (current_link_up == 1 &&
1599                      tp->link_config.active_speed == SPEED_10))
1600                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1601         } else {
1602                 if (current_link_up == 1)
1603                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1604         }
1605
1606         /* ??? Without this setting Netgear GA302T PHY does not
1607          * ??? send/receive packets...
1608          */
1609         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1610             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1611                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1612                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1613                 udelay(80);
1614         }
1615
1616         tw32_f(MAC_MODE, tp->mac_mode);
1617         udelay(40);
1618
1619         if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
1620                 /* Polled via timer. */
1621                 tw32_f(MAC_EVENT, 0);
1622         } else {
1623                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1624         }
1625         udelay(40);
1626
1627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1628             current_link_up == 1 &&
1629             tp->link_config.active_speed == SPEED_1000 &&
1630             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1631              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1632                 udelay(120);
1633                 tw32_f(MAC_STATUS,
1634                      (MAC_STATUS_SYNC_CHANGED |
1635                       MAC_STATUS_CFG_CHANGED));
1636                 udelay(40);
1637                 tg3_write_mem(tp,
1638                               NIC_SRAM_FIRMWARE_MBOX,
1639                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1640         }
1641
1642         if (current_link_up != netif_carrier_ok(tp->dev)) {
1643                 if (current_link_up)
1644                         netif_carrier_on(tp->dev);
1645                 else
1646                         netif_carrier_off(tp->dev);
1647                 tg3_link_report(tp);
1648         }
1649
1650         return 0;
1651 }
1652
1653 struct tg3_fiber_aneginfo {
1654         int state;
1655 #define ANEG_STATE_UNKNOWN              0
1656 #define ANEG_STATE_AN_ENABLE            1
1657 #define ANEG_STATE_RESTART_INIT         2
1658 #define ANEG_STATE_RESTART              3
1659 #define ANEG_STATE_DISABLE_LINK_OK      4
1660 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1661 #define ANEG_STATE_ABILITY_DETECT       6
1662 #define ANEG_STATE_ACK_DETECT_INIT      7
1663 #define ANEG_STATE_ACK_DETECT           8
1664 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1665 #define ANEG_STATE_COMPLETE_ACK         10
1666 #define ANEG_STATE_IDLE_DETECT_INIT     11
1667 #define ANEG_STATE_IDLE_DETECT          12
1668 #define ANEG_STATE_LINK_OK              13
1669 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1670 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1671
1672         u32 flags;
1673 #define MR_AN_ENABLE            0x00000001
1674 #define MR_RESTART_AN           0x00000002
1675 #define MR_AN_COMPLETE          0x00000004
1676 #define MR_PAGE_RX              0x00000008
1677 #define MR_NP_LOADED            0x00000010
1678 #define MR_TOGGLE_TX            0x00000020
1679 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1680 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1681 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1682 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1683 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1684 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1685 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1686 #define MR_TOGGLE_RX            0x00002000
1687 #define MR_NP_RX                0x00004000
1688
1689 #define MR_LINK_OK              0x80000000
1690
1691         unsigned long link_time, cur_time;
1692
1693         u32 ability_match_cfg;
1694         int ability_match_count;
1695
1696         char ability_match, idle_match, ack_match;
1697
1698         u32 txconfig, rxconfig;
1699 #define ANEG_CFG_NP             0x00000080
1700 #define ANEG_CFG_ACK            0x00000040
1701 #define ANEG_CFG_RF2            0x00000020
1702 #define ANEG_CFG_RF1            0x00000010
1703 #define ANEG_CFG_PS2            0x00000001
1704 #define ANEG_CFG_PS1            0x00008000
1705 #define ANEG_CFG_HD             0x00004000
1706 #define ANEG_CFG_FD             0x00002000
1707 #define ANEG_CFG_INVAL          0x00001f06
1708
1709 };
1710 #define ANEG_OK         0
1711 #define ANEG_DONE       1
1712 #define ANEG_TIMER_ENAB 2
1713 #define ANEG_FAILED     -1
1714
1715 #define ANEG_STATE_SETTLE_TIME  10000
1716
1717 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1718                                    struct tg3_fiber_aneginfo *ap)
1719 {
1720         unsigned long delta;
1721         u32 rx_cfg_reg;
1722         int ret;
1723
1724         if (ap->state == ANEG_STATE_UNKNOWN) {
1725                 ap->rxconfig = 0;
1726                 ap->link_time = 0;
1727                 ap->cur_time = 0;
1728                 ap->ability_match_cfg = 0;
1729                 ap->ability_match_count = 0;
1730                 ap->ability_match = 0;
1731                 ap->idle_match = 0;
1732                 ap->ack_match = 0;
1733         }
1734         ap->cur_time++;
1735
1736         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1737                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1738
1739                 if (rx_cfg_reg != ap->ability_match_cfg) {
1740                         ap->ability_match_cfg = rx_cfg_reg;
1741                         ap->ability_match = 0;
1742                         ap->ability_match_count = 0;
1743                 } else {
1744                         if (++ap->ability_match_count > 1) {
1745                                 ap->ability_match = 1;
1746                                 ap->ability_match_cfg = rx_cfg_reg;
1747                         }
1748                 }
1749                 if (rx_cfg_reg & ANEG_CFG_ACK)
1750                         ap->ack_match = 1;
1751                 else
1752                         ap->ack_match = 0;
1753
1754                 ap->idle_match = 0;
1755         } else {
1756                 ap->idle_match = 1;
1757                 ap->ability_match_cfg = 0;
1758                 ap->ability_match_count = 0;
1759                 ap->ability_match = 0;
1760                 ap->ack_match = 0;
1761
1762                 rx_cfg_reg = 0;
1763         }
1764
1765         ap->rxconfig = rx_cfg_reg;
1766         ret = ANEG_OK;
1767
1768         switch(ap->state) {
1769         case ANEG_STATE_UNKNOWN:
1770                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1771                         ap->state = ANEG_STATE_AN_ENABLE;
1772
1773                 /* fallthru */
1774         case ANEG_STATE_AN_ENABLE:
1775                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1776                 if (ap->flags & MR_AN_ENABLE) {
1777                         ap->link_time = 0;
1778                         ap->cur_time = 0;
1779                         ap->ability_match_cfg = 0;
1780                         ap->ability_match_count = 0;
1781                         ap->ability_match = 0;
1782                         ap->idle_match = 0;
1783                         ap->ack_match = 0;
1784
1785                         ap->state = ANEG_STATE_RESTART_INIT;
1786                 } else {
1787                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1788                 }
1789                 break;
1790
1791         case ANEG_STATE_RESTART_INIT:
1792                 ap->link_time = ap->cur_time;
1793                 ap->flags &= ~(MR_NP_LOADED);
1794                 ap->txconfig = 0;
1795                 tw32(MAC_TX_AUTO_NEG, 0);
1796                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1797                 tw32_f(MAC_MODE, tp->mac_mode);
1798                 udelay(40);
1799
1800                 ret = ANEG_TIMER_ENAB;
1801                 ap->state = ANEG_STATE_RESTART;
1802
1803                 /* fallthru */
1804         case ANEG_STATE_RESTART:
1805                 delta = ap->cur_time - ap->link_time;
1806                 if (delta > ANEG_STATE_SETTLE_TIME) {
1807                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1808                 } else {
1809                         ret = ANEG_TIMER_ENAB;
1810                 }
1811                 break;
1812
1813         case ANEG_STATE_DISABLE_LINK_OK:
1814                 ret = ANEG_DONE;
1815                 break;
1816
1817         case ANEG_STATE_ABILITY_DETECT_INIT:
1818                 ap->flags &= ~(MR_TOGGLE_TX);
1819                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1820                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1821                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1822                 tw32_f(MAC_MODE, tp->mac_mode);
1823                 udelay(40);
1824
1825                 ap->state = ANEG_STATE_ABILITY_DETECT;
1826                 break;
1827
1828         case ANEG_STATE_ABILITY_DETECT:
1829                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1830                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1831                 }
1832                 break;
1833
1834         case ANEG_STATE_ACK_DETECT_INIT:
1835                 ap->txconfig |= ANEG_CFG_ACK;
1836                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1837                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1838                 tw32_f(MAC_MODE, tp->mac_mode);
1839                 udelay(40);
1840
1841                 ap->state = ANEG_STATE_ACK_DETECT;
1842
1843                 /* fallthru */
1844         case ANEG_STATE_ACK_DETECT:
1845                 if (ap->ack_match != 0) {
1846                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1847                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1848                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1849                         } else {
1850                                 ap->state = ANEG_STATE_AN_ENABLE;
1851                         }
1852                 } else if (ap->ability_match != 0 &&
1853                            ap->rxconfig == 0) {
1854                         ap->state = ANEG_STATE_AN_ENABLE;
1855                 }
1856                 break;
1857
1858         case ANEG_STATE_COMPLETE_ACK_INIT:
1859                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1860                         ret = ANEG_FAILED;
1861                         break;
1862                 }
1863                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1864                                MR_LP_ADV_HALF_DUPLEX |
1865                                MR_LP_ADV_SYM_PAUSE |
1866                                MR_LP_ADV_ASYM_PAUSE |
1867                                MR_LP_ADV_REMOTE_FAULT1 |
1868                                MR_LP_ADV_REMOTE_FAULT2 |
1869                                MR_LP_ADV_NEXT_PAGE |
1870                                MR_TOGGLE_RX |
1871                                MR_NP_RX);
1872                 if (ap->rxconfig & ANEG_CFG_FD)
1873                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1874                 if (ap->rxconfig & ANEG_CFG_HD)
1875                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1876                 if (ap->rxconfig & ANEG_CFG_PS1)
1877                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1878                 if (ap->rxconfig & ANEG_CFG_PS2)
1879                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1880                 if (ap->rxconfig & ANEG_CFG_RF1)
1881                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1882                 if (ap->rxconfig & ANEG_CFG_RF2)
1883                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1884                 if (ap->rxconfig & ANEG_CFG_NP)
1885                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1886
1887                 ap->link_time = ap->cur_time;
1888
1889                 ap->flags ^= (MR_TOGGLE_TX);
1890                 if (ap->rxconfig & 0x0008)
1891                         ap->flags |= MR_TOGGLE_RX;
1892                 if (ap->rxconfig & ANEG_CFG_NP)
1893                         ap->flags |= MR_NP_RX;
1894                 ap->flags |= MR_PAGE_RX;
1895
1896                 ap->state = ANEG_STATE_COMPLETE_ACK;
1897                 ret = ANEG_TIMER_ENAB;
1898                 break;
1899
1900         case ANEG_STATE_COMPLETE_ACK:
1901                 if (ap->ability_match != 0 &&
1902                     ap->rxconfig == 0) {
1903                         ap->state = ANEG_STATE_AN_ENABLE;
1904                         break;
1905                 }
1906                 delta = ap->cur_time - ap->link_time;
1907                 if (delta > ANEG_STATE_SETTLE_TIME) {
1908                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1909                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1910                         } else {
1911                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1912                                     !(ap->flags & MR_NP_RX)) {
1913                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1914                                 } else {
1915                                         ret = ANEG_FAILED;
1916                                 }
1917                         }
1918                 }
1919                 break;
1920
1921         case ANEG_STATE_IDLE_DETECT_INIT:
1922                 ap->link_time = ap->cur_time;
1923                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1924                 tw32_f(MAC_MODE, tp->mac_mode);
1925                 udelay(40);
1926
1927                 ap->state = ANEG_STATE_IDLE_DETECT;
1928                 ret = ANEG_TIMER_ENAB;
1929                 break;
1930
1931         case ANEG_STATE_IDLE_DETECT:
1932                 if (ap->ability_match != 0 &&
1933                     ap->rxconfig == 0) {
1934                         ap->state = ANEG_STATE_AN_ENABLE;
1935                         break;
1936                 }
1937                 delta = ap->cur_time - ap->link_time;
1938                 if (delta > ANEG_STATE_SETTLE_TIME) {
1939                         /* XXX another gem from the Broadcom driver :( */
1940                         ap->state = ANEG_STATE_LINK_OK;
1941                 }
1942                 break;
1943
1944         case ANEG_STATE_LINK_OK:
1945                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1946                 ret = ANEG_DONE;
1947                 break;
1948
1949         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1950                 /* ??? unimplemented */
1951                 break;
1952
1953         case ANEG_STATE_NEXT_PAGE_WAIT:
1954                 /* ??? unimplemented */
1955                 break;
1956
1957         default:
1958                 ret = ANEG_FAILED;
1959                 break;
1960         };
1961
1962         return ret;
1963 }
1964
1965 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
1966 {
1967         int res = 0;
1968
1969         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
1970                 u32 dig_status;
1971
1972                 dig_status = tr32(SG_DIG_STATUS);
1973                 *flags = 0;
1974                 if (dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
1975                         *flags |= MR_LP_ADV_ASYM_PAUSE;
1976                 if (dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
1977                         *flags |= MR_LP_ADV_SYM_PAUSE;
1978
1979                 if ((dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1980                     !(dig_status & (SG_DIG_AUTONEG_ERROR |
1981                                     SG_DIG_PARTNER_FAULT_MASK)))
1982                         res = 1;
1983         } else {
1984                 struct tg3_fiber_aneginfo aninfo;
1985                 int status = ANEG_FAILED;
1986                 unsigned int tick;
1987                 u32 tmp;
1988
1989                 tw32_f(MAC_TX_AUTO_NEG, 0);
1990
1991                 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1992                 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1993                 udelay(40);
1994
1995                 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1996                 udelay(40);
1997
1998                 memset(&aninfo, 0, sizeof(aninfo));
1999                 aninfo.flags |= MR_AN_ENABLE;
2000                 aninfo.state = ANEG_STATE_UNKNOWN;
2001                 aninfo.cur_time = 0;
2002                 tick = 0;
2003                 while (++tick < 195000) {
2004                         status = tg3_fiber_aneg_smachine(tp, &aninfo);
2005                         if (status == ANEG_DONE || status == ANEG_FAILED)
2006                                 break;
2007
2008                         udelay(1);
2009                 }
2010
2011                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2012                 tw32_f(MAC_MODE, tp->mac_mode);
2013                 udelay(40);
2014
2015                 *flags = aninfo.flags;
2016
2017                 if (status == ANEG_DONE &&
2018                     (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2019                                      MR_LP_ADV_FULL_DUPLEX)))
2020                         res = 1;
2021         }
2022
2023         return res;
2024 }
2025
2026 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2027 {
2028         u32 orig_pause_cfg;
2029         u16 orig_active_speed;
2030         u8 orig_active_duplex;
2031         int current_link_up;
2032         int i;
2033
2034         orig_pause_cfg =
2035                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2036                                   TG3_FLAG_TX_PAUSE));
2037         orig_active_speed = tp->link_config.active_speed;
2038         orig_active_duplex = tp->link_config.active_duplex;
2039
2040         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2041         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2042         tw32_f(MAC_MODE, tp->mac_mode);
2043         udelay(40);
2044
2045         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
2046                 /* Allow time for the hardware to auto-negotiate (195ms) */
2047                 unsigned int tick = 0;
2048
2049                 while (++tick < 195000) { 
2050                         if (tr32(SG_DIG_STATUS) & SG_DIG_AUTONEG_COMPLETE)
2051                                 break;
2052                         udelay(1);
2053                 }
2054                 if (tick >= 195000)
2055                         printk(KERN_INFO PFX "%s: HW autoneg failed !\n",
2056                             tp->dev->name);
2057         }
2058
2059         /* Reset when initting first time or we have a link. */
2060         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
2061             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2062                 /* Set PLL lock range. */
2063                 tg3_writephy(tp, 0x16, 0x8007);
2064
2065                 /* SW reset */
2066                 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2067
2068                 /* Wait for reset to complete. */
2069                 /* XXX schedule_timeout() ... */
2070                 for (i = 0; i < 500; i++)
2071                         udelay(10);
2072
2073                 /* Config mode; select PMA/Ch 1 regs. */
2074                 tg3_writephy(tp, 0x10, 0x8411);
2075
2076                 /* Enable auto-lock and comdet, select txclk for tx. */
2077                 tg3_writephy(tp, 0x11, 0x0a10);
2078
2079                 tg3_writephy(tp, 0x18, 0x00a0);
2080                 tg3_writephy(tp, 0x16, 0x41ff);
2081
2082                 /* Assert and deassert POR. */
2083                 tg3_writephy(tp, 0x13, 0x0400);
2084                 udelay(40);
2085                 tg3_writephy(tp, 0x13, 0x0000);
2086
2087                 tg3_writephy(tp, 0x11, 0x0a50);
2088                 udelay(40);
2089                 tg3_writephy(tp, 0x11, 0x0a10);
2090
2091                 /* Wait for signal to stabilize */
2092                 /* XXX schedule_timeout() ... */
2093                 for (i = 0; i < 15000; i++)
2094                         udelay(10);
2095
2096                 /* Deselect the channel register so we can read the PHYID
2097                  * later.
2098                  */
2099                 tg3_writephy(tp, 0x10, 0x8011);
2100         }
2101
2102         /* Enable link change interrupt unless serdes polling.  */
2103         if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
2104                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2105         else
2106                 tw32_f(MAC_EVENT, 0);
2107         udelay(40);
2108
2109         current_link_up = 0;
2110         if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
2111                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2112                         u32 flags;
2113   
2114                         if (fiber_autoneg(tp, &flags)) {
2115                                 u32 local_adv, remote_adv;
2116
2117                                 local_adv = ADVERTISE_PAUSE_CAP;
2118                                 remote_adv = 0;
2119                                 if (flags & MR_LP_ADV_SYM_PAUSE)
2120                                         remote_adv |= LPA_PAUSE_CAP;
2121                                 if (flags & MR_LP_ADV_ASYM_PAUSE)
2122                                         remote_adv |= LPA_PAUSE_ASYM;
2123
2124                                 tg3_setup_flow_control(tp, local_adv, remote_adv);
2125
2126                                 tp->tg3_flags |=
2127                                         TG3_FLAG_GOT_SERDES_FLOWCTL;
2128                                 current_link_up = 1;
2129                         }
2130                         for (i = 0; i < 60; i++) {
2131                                 udelay(20);
2132                                 tw32_f(MAC_STATUS,
2133                                      (MAC_STATUS_SYNC_CHANGED |
2134                                       MAC_STATUS_CFG_CHANGED));
2135                                 udelay(40);
2136                                 if ((tr32(MAC_STATUS) &
2137                                      (MAC_STATUS_SYNC_CHANGED |
2138                                       MAC_STATUS_CFG_CHANGED)) == 0)
2139                                         break;
2140                         }
2141                         if (current_link_up == 0 &&
2142                             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2143                                 current_link_up = 1;
2144                         }
2145                 } else {
2146                         /* Forcing 1000FD link up. */
2147                         current_link_up = 1;
2148                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2149                 }
2150         } else
2151                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2152
2153         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2154         tw32_f(MAC_MODE, tp->mac_mode);
2155         udelay(40);
2156
2157         tp->hw_status->status =
2158                 (SD_STATUS_UPDATED |
2159                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2160
2161         for (i = 0; i < 100; i++) {
2162                 udelay(20);
2163                 tw32_f(MAC_STATUS,
2164                      (MAC_STATUS_SYNC_CHANGED |
2165                       MAC_STATUS_CFG_CHANGED));
2166                 udelay(40);
2167                 if ((tr32(MAC_STATUS) &
2168                      (MAC_STATUS_SYNC_CHANGED |
2169                       MAC_STATUS_CFG_CHANGED)) == 0)
2170                         break;
2171         }
2172
2173         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
2174                 current_link_up = 0;
2175
2176         if (current_link_up == 1) {
2177                 tp->link_config.active_speed = SPEED_1000;
2178                 tp->link_config.active_duplex = DUPLEX_FULL;
2179                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2180                                     LED_CTRL_LNKLED_OVERRIDE |
2181                                     LED_CTRL_1000MBPS_ON));
2182         } else {
2183                 tp->link_config.active_speed = SPEED_INVALID;
2184                 tp->link_config.active_duplex = DUPLEX_INVALID;
2185                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2186                                     LED_CTRL_LNKLED_OVERRIDE |
2187                                     LED_CTRL_TRAFFIC_OVERRIDE));
2188         }
2189
2190         if (current_link_up != netif_carrier_ok(tp->dev)) {
2191                 if (current_link_up)
2192                         netif_carrier_on(tp->dev);
2193                 else
2194                         netif_carrier_off(tp->dev);
2195                 tg3_link_report(tp);
2196         } else {
2197                 u32 now_pause_cfg =
2198                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2199                                          TG3_FLAG_TX_PAUSE);
2200                 if (orig_pause_cfg != now_pause_cfg ||
2201                     orig_active_speed != tp->link_config.active_speed ||
2202                     orig_active_duplex != tp->link_config.active_duplex)
2203                         tg3_link_report(tp);
2204         }
2205
2206         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
2207                 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
2208                 udelay(40);
2209                 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
2210                         tw32_f(MAC_MODE, tp->mac_mode);
2211                         udelay(40);
2212                 }
2213         }
2214
2215         return 0;
2216 }
2217
2218 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2219 {
2220         int err;
2221
2222         if (tp->phy_id == PHY_ID_SERDES) {
2223                 err = tg3_setup_fiber_phy(tp, force_reset);
2224         } else {
2225                 err = tg3_setup_copper_phy(tp, force_reset);
2226         }
2227
2228         if (tp->link_config.active_speed == SPEED_1000 &&
2229             tp->link_config.active_duplex == DUPLEX_HALF)
2230                 tw32(MAC_TX_LENGTHS,
2231                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2232                       (6 << TX_LENGTHS_IPG_SHIFT) |
2233                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2234         else
2235                 tw32(MAC_TX_LENGTHS,
2236                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2237                       (6 << TX_LENGTHS_IPG_SHIFT) |
2238                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2239
2240         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2241             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2242                 if (netif_carrier_ok(tp->dev)) {
2243                         tw32(HOSTCC_STAT_COAL_TICKS,
2244                              DEFAULT_STAT_COAL_TICKS);
2245                 } else {
2246                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2247                 }
2248         }
2249
2250         return err;
2251 }
2252
2253 /* Tigon3 never reports partial packet sends.  So we do not
2254  * need special logic to handle SKBs that have not had all
2255  * of their frags sent yet, like SunGEM does.
2256  */
2257 static void tg3_tx(struct tg3 *tp)
2258 {
2259         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2260         u32 sw_idx = tp->tx_cons;
2261
2262         while (sw_idx != hw_idx) {
2263                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2264                 struct sk_buff *skb = ri->skb;
2265                 int i;
2266
2267                 if (unlikely(skb == NULL))
2268                         BUG();
2269
2270                 pci_unmap_single(tp->pdev,
2271                                  pci_unmap_addr(ri, mapping),
2272                                  skb_headlen(skb),
2273                                  PCI_DMA_TODEVICE);
2274
2275                 ri->skb = NULL;
2276
2277                 sw_idx = NEXT_TX(sw_idx);
2278
2279                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2280                         if (unlikely(sw_idx == hw_idx))
2281                                 BUG();
2282
2283                         ri = &tp->tx_buffers[sw_idx];
2284                         if (unlikely(ri->skb != NULL))
2285                                 BUG();
2286
2287                         pci_unmap_page(tp->pdev,
2288                                        pci_unmap_addr(ri, mapping),
2289                                        skb_shinfo(skb)->frags[i].size,
2290                                        PCI_DMA_TODEVICE);
2291
2292                         sw_idx = NEXT_TX(sw_idx);
2293                 }
2294
2295                 dev_kfree_skb_irq(skb);
2296         }
2297
2298         tp->tx_cons = sw_idx;
2299
2300         if (netif_queue_stopped(tp->dev) &&
2301             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2302                 netif_wake_queue(tp->dev);
2303 }
2304
2305 /* Returns size of skb allocated or < 0 on error.
2306  *
2307  * We only need to fill in the address because the other members
2308  * of the RX descriptor are invariant, see tg3_init_rings.
2309  *
2310  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2311  * posting buffers we only dirty the first cache line of the RX
2312  * descriptor (containing the address).  Whereas for the RX status
2313  * buffers the cpu only reads the last cacheline of the RX descriptor
2314  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2315  */
2316 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2317                             int src_idx, u32 dest_idx_unmasked)
2318 {
2319         struct tg3_rx_buffer_desc *desc;
2320         struct ring_info *map, *src_map;
2321         struct sk_buff *skb;
2322         dma_addr_t mapping;
2323         int skb_size, dest_idx;
2324
2325         src_map = NULL;
2326         switch (opaque_key) {
2327         case RXD_OPAQUE_RING_STD:
2328                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2329                 desc = &tp->rx_std[dest_idx];
2330                 map = &tp->rx_std_buffers[dest_idx];
2331                 if (src_idx >= 0)
2332                         src_map = &tp->rx_std_buffers[src_idx];
2333                 skb_size = RX_PKT_BUF_SZ;
2334                 break;
2335
2336         case RXD_OPAQUE_RING_JUMBO:
2337                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2338                 desc = &tp->rx_jumbo[dest_idx];
2339                 map = &tp->rx_jumbo_buffers[dest_idx];
2340                 if (src_idx >= 0)
2341                         src_map = &tp->rx_jumbo_buffers[src_idx];
2342                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2343                 break;
2344
2345         default:
2346                 return -EINVAL;
2347         };
2348
2349         /* Do not overwrite any of the map or rp information
2350          * until we are sure we can commit to a new buffer.
2351          *
2352          * Callers depend upon this behavior and assume that
2353          * we leave everything unchanged if we fail.
2354          */
2355         skb = dev_alloc_skb(skb_size);
2356         if (skb == NULL)
2357                 return -ENOMEM;
2358
2359         skb->dev = tp->dev;
2360         skb_reserve(skb, tp->rx_offset);
2361
2362         mapping = pci_map_single(tp->pdev, skb->data,
2363                                  skb_size - tp->rx_offset,
2364                                  PCI_DMA_FROMDEVICE);
2365
2366         map->skb = skb;
2367         pci_unmap_addr_set(map, mapping, mapping);
2368
2369         if (src_map != NULL)
2370                 src_map->skb = NULL;
2371
2372         desc->addr_hi = ((u64)mapping >> 32);
2373         desc->addr_lo = ((u64)mapping & 0xffffffff);
2374
2375         return skb_size;
2376 }
2377
2378 /* We only need to move over in the address because the other
2379  * members of the RX descriptor are invariant.  See notes above
2380  * tg3_alloc_rx_skb for full details.
2381  */
2382 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2383                            int src_idx, u32 dest_idx_unmasked)
2384 {
2385         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2386         struct ring_info *src_map, *dest_map;
2387         int dest_idx;
2388
2389         switch (opaque_key) {
2390         case RXD_OPAQUE_RING_STD:
2391                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2392                 dest_desc = &tp->rx_std[dest_idx];
2393                 dest_map = &tp->rx_std_buffers[dest_idx];
2394                 src_desc = &tp->rx_std[src_idx];
2395                 src_map = &tp->rx_std_buffers[src_idx];
2396                 break;
2397
2398         case RXD_OPAQUE_RING_JUMBO:
2399                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2400                 dest_desc = &tp->rx_jumbo[dest_idx];
2401                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2402                 src_desc = &tp->rx_jumbo[src_idx];
2403                 src_map = &tp->rx_jumbo_buffers[src_idx];
2404                 break;
2405
2406         default:
2407                 return;
2408         };
2409
2410         dest_map->skb = src_map->skb;
2411         pci_unmap_addr_set(dest_map, mapping,
2412                            pci_unmap_addr(src_map, mapping));
2413         dest_desc->addr_hi = src_desc->addr_hi;
2414         dest_desc->addr_lo = src_desc->addr_lo;
2415
2416         src_map->skb = NULL;
2417 }
2418
2419 #if TG3_VLAN_TAG_USED
2420 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2421 {
2422         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2423 }
2424 #endif
2425
2426 /* The RX ring scheme is composed of multiple rings which post fresh
2427  * buffers to the chip, and one special ring the chip uses to report
2428  * status back to the host.
2429  *
2430  * The special ring reports the status of received packets to the
2431  * host.  The chip does not write into the original descriptor the
2432  * RX buffer was obtained from.  The chip simply takes the original
2433  * descriptor as provided by the host, updates the status and length
2434  * field, then writes this into the next status ring entry.
2435  *
2436  * Each ring the host uses to post buffers to the chip is described
2437  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2438  * it is first placed into the on-chip ram.  When the packet's length
2439  * is known, it walks down the TG3_BDINFO entries to select the ring.
2440  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2441  * which is within the range of the new packet's length is chosen.
2442  *
2443  * The "separate ring for rx status" scheme may sound queer, but it makes
2444  * sense from a cache coherency perspective.  If only the host writes
2445  * to the buffer post rings, and only the chip writes to the rx status
2446  * rings, then cache lines never move beyond shared-modified state.
2447  * If both the host and chip were to write into the same ring, cache line
2448  * eviction could occur since both entities want it in an exclusive state.
2449  */
2450 static int tg3_rx(struct tg3 *tp, int budget)
2451 {
2452         u32 work_mask;
2453         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2454         u16 hw_idx, sw_idx;
2455         int received;
2456
2457         hw_idx = tp->hw_status->idx[0].rx_producer;
2458         /*
2459          * We need to order the read of hw_idx and the read of
2460          * the opaque cookie.
2461          */
2462         rmb();
2463         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2464         work_mask = 0;
2465         received = 0;
2466         while (sw_idx != hw_idx && budget > 0) {
2467                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2468                 unsigned int len;
2469                 struct sk_buff *skb;
2470                 dma_addr_t dma_addr;
2471                 u32 opaque_key, desc_idx, *post_ptr;
2472
2473                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2474                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2475                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2476                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2477                                                   mapping);
2478                         skb = tp->rx_std_buffers[desc_idx].skb;
2479                         post_ptr = &tp->rx_std_ptr;
2480                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2481                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2482                                                   mapping);
2483                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2484                         post_ptr = &tp->rx_jumbo_ptr;
2485                 }
2486                 else {
2487                         goto next_pkt_nopost;
2488                 }
2489
2490                 work_mask |= opaque_key;
2491
2492                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2493                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2494                 drop_it:
2495                         tg3_recycle_rx(tp, opaque_key,
2496                                        desc_idx, *post_ptr);
2497                 drop_it_no_recycle:
2498                         /* Other statistics kept track of by card. */
2499                         tp->net_stats.rx_dropped++;
2500                         goto next_pkt;
2501                 }
2502
2503                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2504
2505                 if (len > RX_COPY_THRESHOLD) {
2506                         int skb_size;
2507
2508                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2509                                                     desc_idx, *post_ptr);
2510                         if (skb_size < 0)
2511                                 goto drop_it;
2512
2513                         pci_unmap_single(tp->pdev, dma_addr,
2514                                          skb_size - tp->rx_offset,
2515                                          PCI_DMA_FROMDEVICE);
2516
2517                         skb_put(skb, len);
2518                 } else {
2519                         struct sk_buff *copy_skb;
2520
2521                         tg3_recycle_rx(tp, opaque_key,
2522                                        desc_idx, *post_ptr);
2523
2524                         copy_skb = dev_alloc_skb(len + 2);
2525                         if (copy_skb == NULL)
2526                                 goto drop_it_no_recycle;
2527
2528                         copy_skb->dev = tp->dev;
2529                         skb_reserve(copy_skb, 2);
2530                         skb_put(copy_skb, len);
2531                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2532                         memcpy(copy_skb->data, skb->data, len);
2533                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2534
2535                         /* We'll reuse the original ring buffer. */
2536                         skb = copy_skb;
2537                 }
2538
2539                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2540                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2541                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2542                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2543                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2544                 else
2545                         skb->ip_summed = CHECKSUM_NONE;
2546
2547                 skb->protocol = eth_type_trans(skb, tp->dev);
2548 #if TG3_VLAN_TAG_USED
2549                 if (tp->vlgrp != NULL &&
2550                     desc->type_flags & RXD_FLAG_VLAN) {
2551                         tg3_vlan_rx(tp, skb,
2552                                     desc->err_vlan & RXD_VLAN_MASK);
2553                 } else
2554 #endif
2555                         netif_receive_skb(skb);
2556
2557                 tp->dev->last_rx = jiffies;
2558                 received++;
2559                 budget--;
2560
2561 next_pkt:
2562                 (*post_ptr)++;
2563 next_pkt_nopost:
2564                 rx_rcb_ptr++;
2565                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2566         }
2567
2568         /* ACK the status ring. */
2569         tp->rx_rcb_ptr = rx_rcb_ptr;
2570         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2571                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2572
2573         /* Refill RX ring(s). */
2574         if (work_mask & RXD_OPAQUE_RING_STD) {
2575                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2576                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2577                              sw_idx);
2578         }
2579         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2580                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2581                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2582                              sw_idx);
2583         }
2584
2585         return received;
2586 }
2587
2588 static int tg3_poll(struct net_device *netdev, int *budget)
2589 {
2590         struct tg3 *tp = netdev_priv(netdev);
2591         struct tg3_hw_status *sblk = tp->hw_status;
2592         unsigned long flags;
2593         int done;
2594
2595         spin_lock_irqsave(&tp->lock, flags);
2596
2597         /* handle link change and other phy events */
2598         if (!(tp->tg3_flags &
2599               (TG3_FLAG_USE_LINKCHG_REG |
2600                TG3_FLAG_POLL_SERDES))) {
2601                 if (sblk->status & SD_STATUS_LINK_CHG) {
2602                         sblk->status = SD_STATUS_UPDATED |
2603                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2604                         tg3_setup_phy(tp, 0);
2605                 }
2606         }
2607
2608         /* run TX completion thread */
2609         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2610                 spin_lock(&tp->tx_lock);
2611                 tg3_tx(tp);
2612                 spin_unlock(&tp->tx_lock);
2613         }
2614
2615         spin_unlock_irqrestore(&tp->lock, flags);
2616
2617         /* run RX thread, within the bounds set by NAPI.
2618          * All RX "locking" is done by ensuring outside
2619          * code synchronizes with dev->poll()
2620          */
2621         done = 1;
2622         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2623                 int orig_budget = *budget;
2624                 int work_done;
2625
2626                 if (orig_budget > netdev->quota)
2627                         orig_budget = netdev->quota;
2628
2629                 work_done = tg3_rx(tp, orig_budget);
2630
2631                 *budget -= work_done;
2632                 netdev->quota -= work_done;
2633
2634                 if (work_done >= orig_budget)
2635                         done = 0;
2636         }
2637
2638         /* if no more work, tell net stack and NIC we're done */
2639         if (done) {
2640                 spin_lock_irqsave(&tp->lock, flags);
2641                 __netif_rx_complete(netdev);
2642                 tg3_enable_ints(tp);
2643                 spin_unlock_irqrestore(&tp->lock, flags);
2644         }
2645
2646         return (done ? 0 : 1);
2647 }
2648
2649 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2650 {
2651         struct tg3_hw_status *sblk = tp->hw_status;
2652         unsigned int work_exists = 0;
2653
2654         /* check for phy events */
2655         if (!(tp->tg3_flags &
2656               (TG3_FLAG_USE_LINKCHG_REG |
2657                TG3_FLAG_POLL_SERDES))) {
2658                 if (sblk->status & SD_STATUS_LINK_CHG)
2659                         work_exists = 1;
2660         }
2661         /* check for RX/TX work to do */
2662         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2663             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2664                 work_exists = 1;
2665
2666         return work_exists;
2667 }
2668
2669 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2670 {
2671         struct net_device *dev = dev_id;
2672         struct tg3 *tp = netdev_priv(dev);
2673         struct tg3_hw_status *sblk = tp->hw_status;
2674         unsigned long flags;
2675         unsigned int handled = 1;
2676
2677         spin_lock_irqsave(&tp->lock, flags);
2678
2679         if (sblk->status & SD_STATUS_UPDATED) {
2680                 /*
2681                  * writing any value to intr-mbox-0 clears PCI INTA# and
2682                  * chip-internal interrupt pending events.
2683                  * writing non-zero to intr-mbox-0 additional tells the
2684                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2685                  * event coalescing.
2686                  */
2687                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2688                              0x00000001);
2689                 /*
2690                  * Flush PCI write.  This also guarantees that our
2691                  * status block has been flushed to host memory.
2692                  */
2693                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2694                 sblk->status &= ~SD_STATUS_UPDATED;
2695
2696                 if (likely(tg3_has_work(dev, tp)))
2697                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2698                 else {
2699                         /* no work, shared interrupt perhaps?  re-enable
2700                          * interrupts, and flush that PCI write
2701                          */
2702                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2703                                 0x00000000);
2704                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2705                 }
2706         } else {        /* shared interrupt */
2707                 handled = 0;
2708         }
2709
2710         spin_unlock_irqrestore(&tp->lock, flags);
2711
2712         return IRQ_RETVAL(handled);
2713 }
2714
2715 static int tg3_init_hw(struct tg3 *);
2716 static int tg3_halt(struct tg3 *);
2717
2718 #ifdef CONFIG_NET_POLL_CONTROLLER
2719 static void tg3_poll_controller(struct net_device *dev)
2720 {
2721         tg3_interrupt(dev->irq, dev, NULL);
2722 }
2723 #endif
2724
2725 static void tg3_reset_task(void *_data)
2726 {
2727         struct tg3 *tp = _data;
2728         unsigned int restart_timer;
2729
2730         tg3_netif_stop(tp);
2731
2732         spin_lock_irq(&tp->lock);
2733         spin_lock(&tp->tx_lock);
2734
2735         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2736         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2737
2738         tg3_halt(tp);
2739         tg3_init_hw(tp);
2740
2741         spin_unlock(&tp->tx_lock);
2742         spin_unlock_irq(&tp->lock);
2743
2744         tg3_netif_start(tp);
2745
2746         if (restart_timer)
2747                 mod_timer(&tp->timer, jiffies + 1);
2748 }
2749
2750 static void tg3_tx_timeout(struct net_device *dev)
2751 {
2752         struct tg3 *tp = netdev_priv(dev);
2753
2754         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2755                dev->name);
2756
2757         schedule_work(&tp->reset_task);
2758 }
2759
2760 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2761
2762 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2763                                        u32 guilty_entry, int guilty_len,
2764                                        u32 last_plus_one, u32 *start, u32 mss)
2765 {
2766         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2767         dma_addr_t new_addr;
2768         u32 entry = *start;
2769         int i;
2770
2771         if (!new_skb) {
2772                 dev_kfree_skb(skb);
2773                 return -1;
2774         }
2775
2776         /* New SKB is guaranteed to be linear. */
2777         entry = *start;
2778         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2779                                   PCI_DMA_TODEVICE);
2780         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2781                     (skb->ip_summed == CHECKSUM_HW) ?
2782                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2783         *start = NEXT_TX(entry);
2784
2785         /* Now clean up the sw ring entries. */
2786         i = 0;
2787         while (entry != last_plus_one) {
2788                 int len;
2789
2790                 if (i == 0)
2791                         len = skb_headlen(skb);
2792                 else
2793                         len = skb_shinfo(skb)->frags[i-1].size;
2794                 pci_unmap_single(tp->pdev,
2795                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2796                                  len, PCI_DMA_TODEVICE);
2797                 if (i == 0) {
2798                         tp->tx_buffers[entry].skb = new_skb;
2799                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2800                 } else {
2801                         tp->tx_buffers[entry].skb = NULL;
2802                 }
2803                 entry = NEXT_TX(entry);
2804         }
2805
2806         dev_kfree_skb(skb);
2807
2808         return 0;
2809 }
2810
2811 static void tg3_set_txd(struct tg3 *tp, int entry,
2812                         dma_addr_t mapping, int len, u32 flags,
2813                         u32 mss_and_is_end)
2814 {
2815         int is_end = (mss_and_is_end & 0x1);
2816         u32 mss = (mss_and_is_end >> 1);
2817         u32 vlan_tag = 0;
2818
2819         if (is_end)
2820                 flags |= TXD_FLAG_END;
2821         if (flags & TXD_FLAG_VLAN) {
2822                 vlan_tag = flags >> 16;
2823                 flags &= 0xffff;
2824         }
2825         vlan_tag |= (mss << TXD_MSS_SHIFT);
2826         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2827                 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2828
2829                 txd->addr_hi = ((u64) mapping >> 32);
2830                 txd->addr_lo = ((u64) mapping & 0xffffffff);
2831                 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2832                 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2833         } else {
2834                 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2835                 unsigned long txd;
2836
2837                 txd = (tp->regs +
2838                        NIC_SRAM_WIN_BASE +
2839                        NIC_SRAM_TX_BUFFER_DESC);
2840                 txd += (entry * TXD_SIZE);
2841
2842                 /* Save some PIOs */
2843                 if (sizeof(dma_addr_t) != sizeof(u32))
2844                         writel(((u64) mapping >> 32),
2845                                txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2846
2847                 writel(((u64) mapping & 0xffffffff),
2848                        txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2849                 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2850                 if (txr->prev_vlan_tag != vlan_tag) {
2851                         writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2852                         txr->prev_vlan_tag = vlan_tag;
2853                 }
2854         }
2855 }
2856
2857 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2858 {
2859         u32 base = (u32) mapping & 0xffffffff;
2860
2861         return ((base > 0xffffdcc0) &&
2862                 (base + len + 8 < base));
2863 }
2864
2865 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2866 {
2867         struct tg3 *tp = netdev_priv(dev);
2868         dma_addr_t mapping;
2869         unsigned int i;
2870         u32 len, entry, base_flags, mss;
2871         int would_hit_hwbug;
2872         unsigned long flags;
2873
2874         len = skb_headlen(skb);
2875
2876         /* No BH disabling for tx_lock here.  We are running in BH disabled
2877          * context and TX reclaim runs via tp->poll inside of a software
2878          * interrupt.  Rejoice!
2879          *
2880          * Actually, things are not so simple.  If we are to take a hw
2881          * IRQ here, we can deadlock, consider:
2882          *
2883          *       CPU1           CPU2
2884          *   tg3_start_xmit
2885          *   take tp->tx_lock
2886          *                      tg3_timer
2887          *                      take tp->lock
2888          *   tg3_interrupt
2889          *   spin on tp->lock
2890          *                      spin on tp->tx_lock
2891          *
2892          * So we really do need to disable interrupts when taking
2893          * tx_lock here.
2894          */
2895         spin_lock_irqsave(&tp->tx_lock, flags);
2896
2897         /* This is a hard error, log it. */
2898         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2899                 netif_stop_queue(dev);
2900                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2901                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2902                        dev->name);
2903                 return 1;
2904         }
2905
2906         entry = tp->tx_prod;
2907         base_flags = 0;
2908         if (skb->ip_summed == CHECKSUM_HW)
2909                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2910 #if TG3_TSO_SUPPORT != 0
2911         mss = 0;
2912         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2913             (mss = skb_shinfo(skb)->tso_size) != 0) {
2914                 int tcp_opt_len, ip_tcp_len;
2915
2916                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2917                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2918
2919                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2920                                TXD_FLAG_CPU_POST_DMA);
2921
2922                 skb->nh.iph->check = 0;
2923                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2924                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2925                                                       skb->nh.iph->daddr,
2926                                                       0, IPPROTO_TCP, 0);
2927
2928                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2929                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2930                                 int tsflags;
2931
2932                                 tsflags = ((skb->nh.iph->ihl - 5) +
2933                                            (tcp_opt_len >> 2));
2934                                 mss |= (tsflags << 11);
2935                         }
2936                 } else {
2937                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2938                                 int tsflags;
2939
2940                                 tsflags = ((skb->nh.iph->ihl - 5) +
2941                                            (tcp_opt_len >> 2));
2942                                 base_flags |= tsflags << 12;
2943                         }
2944                 }
2945         }
2946 #else
2947         mss = 0;
2948 #endif
2949 #if TG3_VLAN_TAG_USED
2950         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2951                 base_flags |= (TXD_FLAG_VLAN |
2952                                (vlan_tx_tag_get(skb) << 16));
2953 #endif
2954
2955         /* Queue skb data, a.k.a. the main skb fragment. */
2956         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2957
2958         tp->tx_buffers[entry].skb = skb;
2959         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2960
2961         would_hit_hwbug = 0;
2962
2963         if (tg3_4g_overflow_test(mapping, len))
2964                 would_hit_hwbug = entry + 1;
2965
2966         tg3_set_txd(tp, entry, mapping, len, base_flags,
2967                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2968
2969         entry = NEXT_TX(entry);
2970
2971         /* Now loop through additional data fragments, and queue them. */
2972         if (skb_shinfo(skb)->nr_frags > 0) {
2973                 unsigned int i, last;
2974
2975                 last = skb_shinfo(skb)->nr_frags - 1;
2976                 for (i = 0; i <= last; i++) {
2977                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2978
2979                         len = frag->size;
2980                         mapping = pci_map_page(tp->pdev,
2981                                                frag->page,
2982                                                frag->page_offset,
2983                                                len, PCI_DMA_TODEVICE);
2984
2985                         tp->tx_buffers[entry].skb = NULL;
2986                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2987
2988                         if (tg3_4g_overflow_test(mapping, len)) {
2989                                 /* Only one should match. */
2990                                 if (would_hit_hwbug)
2991                                         BUG();
2992                                 would_hit_hwbug = entry + 1;
2993                         }
2994
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
2996                                 tg3_set_txd(tp, entry, mapping, len,
2997                                             base_flags, (i == last)|(mss << 1));
2998                         else
2999                                 tg3_set_txd(tp, entry, mapping, len,
3000                                             base_flags, (i == last));
3001
3002                         entry = NEXT_TX(entry);
3003                 }
3004         }
3005
3006         if (would_hit_hwbug) {
3007                 u32 last_plus_one = entry;
3008                 u32 start;
3009                 unsigned int len = 0;
3010
3011                 would_hit_hwbug -= 1;
3012                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3013                 entry &= (TG3_TX_RING_SIZE - 1);
3014                 start = entry;
3015                 i = 0;
3016                 while (entry != last_plus_one) {
3017                         if (i == 0)
3018                                 len = skb_headlen(skb);
3019                         else
3020                                 len = skb_shinfo(skb)->frags[i-1].size;
3021
3022                         if (entry == would_hit_hwbug)
3023                                 break;
3024
3025                         i++;
3026                         entry = NEXT_TX(entry);
3027
3028                 }
3029
3030                 /* If the workaround fails due to memory/mapping
3031                  * failure, silently drop this packet.
3032                  */
3033                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3034                                                 entry, len,
3035                                                 last_plus_one,
3036                                                 &start, mss))
3037                         goto out_unlock;
3038
3039                 entry = start;
3040         }
3041
3042         /* Packets are ready, update Tx producer idx local and on card. */
3043         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3044                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
3045                               TG3_64BIT_REG_LOW), entry);
3046         } else {
3047                 /* First, make sure tg3 sees last descriptor fully
3048                  * in SRAM.
3049                  */
3050                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3051                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
3052
3053                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3054                               TG3_64BIT_REG_LOW), entry);
3055         }
3056
3057         tp->tx_prod = entry;
3058         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3059                 netif_stop_queue(dev);
3060
3061 out_unlock:
3062         spin_unlock_irqrestore(&tp->tx_lock, flags);
3063
3064         dev->trans_start = jiffies;
3065
3066         return 0;
3067 }
3068
3069 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3070                                int new_mtu)
3071 {
3072         dev->mtu = new_mtu;
3073
3074         if (new_mtu > ETH_DATA_LEN)
3075                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3076         else
3077                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3078 }
3079
3080 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3081 {
3082         struct tg3 *tp = netdev_priv(dev);
3083
3084         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3085                 return -EINVAL;
3086
3087         if (!netif_running(dev)) {
3088                 /* We'll just catch it later when the
3089                  * device is up'd.
3090                  */
3091                 tg3_set_mtu(dev, tp, new_mtu);
3092                 return 0;
3093         }
3094
3095         tg3_netif_stop(tp);
3096         spin_lock_irq(&tp->lock);
3097         spin_lock(&tp->tx_lock);
3098
3099         tg3_halt(tp);
3100
3101         tg3_set_mtu(dev, tp, new_mtu);
3102
3103         tg3_init_hw(tp);
3104
3105         spin_unlock(&tp->tx_lock);
3106         spin_unlock_irq(&tp->lock);
3107         tg3_netif_start(tp);
3108
3109         return 0;
3110 }
3111
3112 /* Free up pending packets in all rx/tx rings.
3113  *
3114  * The chip has been shut down and the driver detached from
3115  * the networking, so no interrupts or new tx packets will
3116  * end up in the driver.  tp->{tx,}lock is not held and we are not
3117  * in an interrupt context and thus may sleep.
3118  */
3119 static void tg3_free_rings(struct tg3 *tp)
3120 {
3121         struct ring_info *rxp;
3122         int i;
3123
3124         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3125                 rxp = &tp->rx_std_buffers[i];
3126
3127                 if (rxp->skb == NULL)
3128                         continue;
3129                 pci_unmap_single(tp->pdev,
3130                                  pci_unmap_addr(rxp, mapping),
3131                                  RX_PKT_BUF_SZ - tp->rx_offset,
3132                                  PCI_DMA_FROMDEVICE);
3133                 dev_kfree_skb_any(rxp->skb);
3134                 rxp->skb = NULL;
3135         }
3136
3137         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3138                 rxp = &tp->rx_jumbo_buffers[i];
3139
3140                 if (rxp->skb == NULL)
3141                         continue;
3142                 pci_unmap_single(tp->pdev,
3143                                  pci_unmap_addr(rxp, mapping),
3144                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3145                                  PCI_DMA_FROMDEVICE);
3146                 dev_kfree_skb_any(rxp->skb);
3147                 rxp->skb = NULL;
3148         }
3149
3150         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3151                 struct tx_ring_info *txp;
3152                 struct sk_buff *skb;
3153                 int j;
3154
3155                 txp = &tp->tx_buffers[i];
3156                 skb = txp->skb;
3157
3158                 if (skb == NULL) {
3159                         i++;
3160                         continue;
3161                 }
3162
3163                 pci_unmap_single(tp->pdev,
3164                                  pci_unmap_addr(txp, mapping),
3165                                  skb_headlen(skb),
3166                                  PCI_DMA_TODEVICE);
3167                 txp->skb = NULL;
3168
3169                 i++;
3170
3171                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3172                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3173                         pci_unmap_page(tp->pdev,
3174                                        pci_unmap_addr(txp, mapping),
3175                                        skb_shinfo(skb)->frags[j].size,
3176                                        PCI_DMA_TODEVICE);
3177                         i++;
3178                 }
3179
3180                 dev_kfree_skb_any(skb);
3181         }
3182 }
3183
3184 /* Initialize tx/rx rings for packet processing.
3185  *
3186  * The chip has been shut down and the driver detached from
3187  * the networking, so no interrupts or new tx packets will
3188  * end up in the driver.  tp->{tx,}lock are held and thus
3189  * we may not sleep.
3190  */
3191 static void tg3_init_rings(struct tg3 *tp)
3192 {
3193         unsigned long start, end;
3194         u32 i;
3195
3196         /* Free up all the SKBs. */
3197         tg3_free_rings(tp);
3198
3199         /* Zero out all descriptors. */
3200         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3201         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3202         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3203
3204         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3205                 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3206         } else {
3207                 start = (tp->regs +
3208                          NIC_SRAM_WIN_BASE +
3209                          NIC_SRAM_TX_BUFFER_DESC);
3210                 end = start + TG3_TX_RING_BYTES;
3211                 while (start < end) {
3212                         writel(0, start);
3213                         start += 4;
3214                 }
3215                 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3216                         tp->tx_buffers[i].prev_vlan_tag = 0;
3217         }
3218
3219         /* Initialize invariants of the rings, we only set this
3220          * stuff once.  This works because the card does not
3221          * write into the rx buffer posting rings.
3222          */
3223         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3224                 struct tg3_rx_buffer_desc *rxd;
3225
3226                 rxd = &tp->rx_std[i];
3227                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3228                         << RXD_LEN_SHIFT;
3229                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3230                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3231                                (i << RXD_OPAQUE_INDEX_SHIFT));
3232         }
3233
3234         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3235                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3236                         struct tg3_rx_buffer_desc *rxd;
3237
3238                         rxd = &tp->rx_jumbo[i];
3239                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3240                                 << RXD_LEN_SHIFT;
3241                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3242                                 RXD_FLAG_JUMBO;
3243                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3244                                (i << RXD_OPAQUE_INDEX_SHIFT));
3245                 }
3246         }
3247
3248         /* Now allocate fresh SKBs for each rx ring. */
3249         for (i = 0; i < tp->rx_pending; i++) {
3250                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3251                                      -1, i) < 0)
3252                         break;
3253         }
3254
3255         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3256                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3257                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3258                                              -1, i) < 0)
3259                                 break;
3260                 }
3261         }
3262 }
3263
3264 /*
3265  * Must not be invoked with interrupt sources disabled and
3266  * the hardware shutdown down.
3267  */
3268 static void tg3_free_consistent(struct tg3 *tp)
3269 {
3270         if (tp->rx_std_buffers) {
3271                 kfree(tp->rx_std_buffers);
3272                 tp->rx_std_buffers = NULL;
3273         }
3274         if (tp->rx_std) {
3275                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3276                                     tp->rx_std, tp->rx_std_mapping);
3277                 tp->rx_std = NULL;
3278         }
3279         if (tp->rx_jumbo) {
3280                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3281                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3282                 tp->rx_jumbo = NULL;
3283         }
3284         if (tp->rx_rcb) {
3285                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3286                                     tp->rx_rcb, tp->rx_rcb_mapping);
3287                 tp->rx_rcb = NULL;
3288         }
3289         if (tp->tx_ring) {
3290                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3291                         tp->tx_ring, tp->tx_desc_mapping);
3292                 tp->tx_ring = NULL;
3293         }
3294         if (tp->hw_status) {
3295                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3296                                     tp->hw_status, tp->status_mapping);
3297                 tp->hw_status = NULL;
3298         }
3299         if (tp->hw_stats) {
3300                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3301                                     tp->hw_stats, tp->stats_mapping);
3302                 tp->hw_stats = NULL;
3303         }
3304 }
3305
3306 /*
3307  * Must not be invoked with interrupt sources disabled and
3308  * the hardware shutdown down.  Can sleep.
3309  */
3310 static int tg3_alloc_consistent(struct tg3 *tp)
3311 {
3312         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3313                                       (TG3_RX_RING_SIZE +
3314                                        TG3_RX_JUMBO_RING_SIZE)) +
3315                                      (sizeof(struct tx_ring_info) *
3316                                       TG3_TX_RING_SIZE),
3317                                      GFP_KERNEL);
3318         if (!tp->rx_std_buffers)
3319                 return -ENOMEM;
3320
3321         memset(tp->rx_std_buffers, 0,
3322                (sizeof(struct ring_info) *
3323                 (TG3_RX_RING_SIZE +
3324                  TG3_RX_JUMBO_RING_SIZE)) +
3325                (sizeof(struct tx_ring_info) *
3326                 TG3_TX_RING_SIZE));
3327
3328         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3329         tp->tx_buffers = (struct tx_ring_info *)
3330                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3331
3332         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3333                                           &tp->rx_std_mapping);
3334         if (!tp->rx_std)
3335                 goto err_out;
3336
3337         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3338                                             &tp->rx_jumbo_mapping);
3339
3340         if (!tp->rx_jumbo)
3341                 goto err_out;
3342
3343         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3344                                           &tp->rx_rcb_mapping);
3345         if (!tp->rx_rcb)
3346                 goto err_out;
3347
3348         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3349                 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3350                                                    &tp->tx_desc_mapping);
3351                 if (!tp->tx_ring)
3352                         goto err_out;
3353         } else {
3354                 tp->tx_ring = NULL;
3355                 tp->tx_desc_mapping = 0;
3356         }
3357
3358         tp->hw_status = pci_alloc_consistent(tp->pdev,
3359                                              TG3_HW_STATUS_SIZE,
3360                                              &tp->status_mapping);
3361         if (!tp->hw_status)
3362                 goto err_out;
3363
3364         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3365                                             sizeof(struct tg3_hw_stats),
3366                                             &tp->stats_mapping);
3367         if (!tp->hw_stats)
3368                 goto err_out;
3369
3370         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3371         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3372
3373         return 0;
3374
3375 err_out:
3376         tg3_free_consistent(tp);
3377         return -ENOMEM;
3378 }
3379
3380 #define MAX_WAIT_CNT 1000
3381
3382 /* To stop a block, clear the enable bit and poll till it
3383  * clears.  tp->lock is held.
3384  */
3385 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3386 {
3387         unsigned int i;
3388         u32 val;
3389
3390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3391             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3392                 switch (ofs) {
3393                 case RCVLSC_MODE:
3394                 case DMAC_MODE:
3395                 case MBFREE_MODE:
3396                 case BUFMGR_MODE:
3397                 case MEMARB_MODE:
3398                         /* We can't enable/disable these bits of the
3399                          * 5705/5750, just say success.
3400                          */
3401                         return 0;
3402
3403                 default:
3404                         break;
3405                 };
3406         }
3407
3408         val = tr32(ofs);
3409         val &= ~enable_bit;
3410         tw32_f(ofs, val);
3411
3412         for (i = 0; i < MAX_WAIT_CNT; i++) {
3413                 udelay(100);
3414                 val = tr32(ofs);
3415                 if ((val & enable_bit) == 0)
3416                         break;
3417         }
3418
3419         if (i == MAX_WAIT_CNT) {
3420                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3421                        "ofs=%lx enable_bit=%x\n",
3422                        ofs, enable_bit);
3423                 return -ENODEV;
3424         }
3425
3426         return 0;
3427 }
3428
3429 /* tp->lock is held. */
3430 static int tg3_abort_hw(struct tg3 *tp)
3431 {
3432         int i, err;
3433
3434         tg3_disable_ints(tp);
3435
3436         tp->rx_mode &= ~RX_MODE_ENABLE;
3437         tw32_f(MAC_RX_MODE, tp->rx_mode);
3438         udelay(10);
3439
3440         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3441         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3442         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3443         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3444         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3445         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3446
3447         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3448         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3449         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3450         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3451         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3452         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3453         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3454         if (err)
3455                 goto out;
3456
3457         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3458         tw32_f(MAC_MODE, tp->mac_mode);
3459         udelay(40);
3460
3461         tp->tx_mode &= ~TX_MODE_ENABLE;
3462         tw32_f(MAC_TX_MODE, tp->tx_mode);
3463
3464         for (i = 0; i < MAX_WAIT_CNT; i++) {
3465                 udelay(100);
3466                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3467                         break;
3468         }
3469         if (i >= MAX_WAIT_CNT) {
3470                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3471                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3472                        tp->dev->name, tr32(MAC_TX_MODE));
3473                 return -ENODEV;
3474         }
3475
3476         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3477         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3478         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3479
3480         tw32(FTQ_RESET, 0xffffffff);
3481         tw32(FTQ_RESET, 0x00000000);
3482
3483         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3484         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3485         if (err)
3486                 goto out;
3487
3488         if (tp->hw_status)
3489                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3490         if (tp->hw_stats)
3491                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3492
3493 out:
3494         return err;
3495 }
3496
3497 /* tp->lock is held. */
3498 static int tg3_nvram_lock(struct tg3 *tp)
3499 {
3500         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3501                 int i;
3502
3503                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3504                 for (i = 0; i < 8000; i++) {
3505                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3506                                 break;
3507                         udelay(20);
3508                 }
3509                 if (i == 8000)
3510                         return -ENODEV;
3511         }
3512         return 0;
3513 }
3514
3515 /* tp->lock is held. */
3516 static void tg3_nvram_unlock(struct tg3 *tp)
3517 {
3518         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3519                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3520 }
3521
3522 /* tp->lock is held. */
3523 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3524 {
3525         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3526                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3527
3528         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3529                 switch (kind) {
3530                 case RESET_KIND_INIT:
3531                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3532                                       DRV_STATE_START);
3533                         break;
3534
3535                 case RESET_KIND_SHUTDOWN:
3536                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3537                                       DRV_STATE_UNLOAD);
3538                         break;
3539
3540                 case RESET_KIND_SUSPEND:
3541                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3542                                       DRV_STATE_SUSPEND);
3543                         break;
3544
3545                 default:
3546                         break;
3547                 };
3548         }
3549 }
3550
3551 /* tp->lock is held. */
3552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3553 {
3554         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3555                 switch (kind) {
3556                 case RESET_KIND_INIT:
3557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3558                                       DRV_STATE_START_DONE);
3559                         break;
3560
3561                 case RESET_KIND_SHUTDOWN:
3562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3563                                       DRV_STATE_UNLOAD_DONE);
3564                         break;
3565
3566                 default:
3567                         break;
3568                 };
3569         }
3570 }
3571
3572 /* tp->lock is held. */
3573 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3574 {
3575         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3576                 switch (kind) {
3577                 case RESET_KIND_INIT:
3578                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3579                                       DRV_STATE_START);
3580                         break;
3581
3582                 case RESET_KIND_SHUTDOWN:
3583                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3584                                       DRV_STATE_UNLOAD);
3585                         break;
3586
3587                 case RESET_KIND_SUSPEND:
3588                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3589                                       DRV_STATE_SUSPEND);
3590                         break;
3591
3592                 default:
3593                         break;
3594                 };
3595         }
3596 }
3597
3598 /* tp->lock is held. */
3599 static int tg3_chip_reset(struct tg3 *tp)
3600 {
3601         u32 val;
3602         u32 flags_save;
3603         int i;
3604
3605         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704))
3606                 tg3_nvram_lock(tp);
3607
3608         /*
3609          * We must avoid the readl() that normally takes place.
3610          * It locks machines, causes machine checks, and other
3611          * fun things.  So, temporarily disable the 5701
3612          * hardware workaround, while we do the reset.
3613          */
3614         flags_save = tp->tg3_flags;
3615         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3616
3617         /* do the reset */
3618         val = GRC_MISC_CFG_CORECLK_RESET;
3619
3620         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3621                 if (tr32(0x7e2c) == 0x60) {
3622                         tw32(0x7e2c, 0x20);
3623                 }
3624                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3625                         tw32(GRC_MISC_CFG, (1 << 29));
3626                         val |= (1 << 29);
3627                 }
3628         }
3629
3630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3632                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3633         tw32(GRC_MISC_CFG, val);
3634
3635         /* restore 5701 hardware bug workaround flag */
3636         tp->tg3_flags = flags_save;
3637
3638         /* Unfortunately, we have to delay before the PCI read back.
3639          * Some 575X chips even will not respond to a PCI cfg access
3640          * when the reset command is given to the chip.
3641          *
3642          * How do these hardware designers expect things to work
3643          * properly if the PCI write is posted for a long period
3644          * of time?  It is always necessary to have some method by
3645          * which a register read back can occur to push the write
3646          * out which does the reset.
3647          *
3648          * For most tg3 variants the trick below was working.
3649          * Ho hum...
3650          */
3651         udelay(120);
3652
3653         /* Flush PCI posted writes.  The normal MMIO registers
3654          * are inaccessible at this time so this is the only
3655          * way to make this reliably (actually, this is no longer
3656          * the case, see above).  I tried to use indirect
3657          * register read/write but this upset some 5701 variants.
3658          */
3659         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3660
3661         udelay(120);
3662
3663         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3664                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3665                         int i;
3666                         u32 cfg_val;
3667
3668                         /* Wait for link training to complete.  */
3669                         for (i = 0; i < 5000; i++)
3670                                 udelay(100);
3671
3672                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3673                         pci_write_config_dword(tp->pdev, 0xc4,
3674                                                cfg_val | (1 << 15));
3675                 }
3676                 /* Set PCIE max payload size and clear error status.  */
3677                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3678         }
3679
3680         /* Re-enable indirect register accesses. */
3681         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3682                                tp->misc_host_ctrl);
3683
3684         /* Set MAX PCI retry to zero. */
3685         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3686         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3687             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3688                 val |= PCISTATE_RETRY_SAME_DMA;
3689         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3690
3691         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3692
3693         /* Make sure PCI-X relaxed ordering bit is clear. */
3694         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3695         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3696         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3697
3698         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3699
3700         tw32(GRC_MODE, tp->grc_mode);
3701
3702         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3703                 u32 val = tr32(0xc4);
3704
3705                 tw32(0xc4, val | (1 << 15));
3706         }
3707
3708         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3710                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3711                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3712                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3713                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3714         }
3715
3716         if (tp->phy_id == PHY_ID_SERDES) {
3717                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3718                 tw32_f(MAC_MODE, tp->mac_mode);
3719         } else
3720                 tw32_f(MAC_MODE, 0);
3721         udelay(40);
3722
3723         /* Wait for firmware initialization to complete. */
3724         for (i = 0; i < 100000; i++) {
3725                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3726                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3727                         break;
3728                 udelay(10);
3729         }
3730         if (i >= 100000 &&
3731             !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3732                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3733                        "firmware will not restart magic=%08x\n",
3734                        tp->dev->name, val);
3735                 return -ENODEV;
3736         }
3737
3738         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3739             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3740                 u32 val = tr32(0x7c00);
3741
3742                 tw32(0x7c00, val | (1 << 25));
3743         }
3744
3745         /* Reprobe ASF enable state.  */
3746         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3747         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3748         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3749         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3750                 u32 nic_cfg;
3751
3752                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3753                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3754                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3755                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3756                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3757                 }
3758         }
3759
3760         return 0;
3761 }
3762
3763 /* tp->lock is held. */
3764 static void tg3_stop_fw(struct tg3 *tp)
3765 {
3766         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3767                 u32 val;
3768                 int i;
3769
3770                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3771                 val = tr32(GRC_RX_CPU_EVENT);
3772                 val |= (1 << 14);
3773                 tw32(GRC_RX_CPU_EVENT, val);
3774
3775                 /* Wait for RX cpu to ACK the event.  */
3776                 for (i = 0; i < 100; i++) {
3777                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3778                                 break;
3779                         udelay(1);
3780                 }
3781         }
3782 }
3783
3784 /* tp->lock is held. */
3785 static int tg3_halt(struct tg3 *tp)
3786 {
3787         int err;
3788
3789         tg3_stop_fw(tp);
3790
3791         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3792
3793         tg3_abort_hw(tp);
3794         err = tg3_chip_reset(tp);
3795
3796         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3797         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3798
3799         if (err)
3800                 return err;
3801
3802         return 0;
3803 }
3804
3805 #define TG3_FW_RELEASE_MAJOR    0x0
3806 #define TG3_FW_RELASE_MINOR     0x0
3807 #define TG3_FW_RELEASE_FIX      0x0
3808 #define TG3_FW_START_ADDR       0x08000000
3809 #define TG3_FW_TEXT_ADDR        0x08000000
3810 #define TG3_FW_TEXT_LEN         0x9c0
3811 #define TG3_FW_RODATA_ADDR      0x080009c0
3812 #define TG3_FW_RODATA_LEN       0x60
3813 #define TG3_FW_DATA_ADDR        0x08000a40
3814 #define TG3_FW_DATA_LEN         0x20
3815 #define TG3_FW_SBSS_ADDR        0x08000a60
3816 #define TG3_FW_SBSS_LEN         0xc
3817 #define TG3_FW_BSS_ADDR         0x08000a70
3818 #define TG3_FW_BSS_LEN          0x10
3819
3820 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3821         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3822         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3823         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3824         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3825         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3826         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3827         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3828         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3829         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3830         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3831         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3832         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3833         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3834         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3835         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3836         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3837         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3838         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3839         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3840         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3841         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3842         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3843         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3844         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3845         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3846         0, 0, 0, 0, 0, 0,
3847         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3848         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3849         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3850         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3851         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3852         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3853         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3854         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3855         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3856         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3857         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3858         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3859         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3860         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3861         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3862         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3863         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3864         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3865         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3866         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3867         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3868         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3869         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3870         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3871         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3872         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3873         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3874         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3875         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3876         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3877         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3878         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3879         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3880         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3881         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3882         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3883         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3884         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3885         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3886         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3887         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3888         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3889         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3890         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3891         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3892         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3893         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3894         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3895         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3896         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3897         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3898         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3899         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3900         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3901         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3902         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3903         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3904         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3905         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3906         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3907         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3908         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3909         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
3910         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
3911         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
3912 };
3913
3914 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
3915         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
3916         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
3917         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
3918         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
3919         0x00000000
3920 };
3921
3922 #if 0 /* All zeros, don't eat up space with it. */
3923 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
3924         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3925         0x00000000, 0x00000000, 0x00000000, 0x00000000
3926 };
3927 #endif
3928
3929 #define RX_CPU_SCRATCH_BASE     0x30000
3930 #define RX_CPU_SCRATCH_SIZE     0x04000
3931 #define TX_CPU_SCRATCH_BASE     0x34000
3932 #define TX_CPU_SCRATCH_SIZE     0x04000
3933
3934 /* tp->lock is held. */
3935 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3936 {
3937         int i;
3938
3939         if (offset == TX_CPU_BASE &&
3940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3941                 BUG();
3942
3943         if (offset == RX_CPU_BASE) {
3944                 for (i = 0; i < 10000; i++) {
3945                         tw32(offset + CPU_STATE, 0xffffffff);
3946                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3947                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3948                                 break;
3949                 }
3950
3951                 tw32(offset + CPU_STATE, 0xffffffff);
3952                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3953                 udelay(10);
3954         } else {
3955                 for (i = 0; i < 10000; i++) {
3956                         tw32(offset + CPU_STATE, 0xffffffff);
3957                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3958                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3959                                 break;
3960                 }
3961         }
3962
3963         if (i >= 10000) {
3964                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
3965                        "and %s CPU\n",
3966                        tp->dev->name,
3967                        (offset == RX_CPU_BASE ? "RX" : "TX"));
3968                 return -ENODEV;
3969         }
3970         return 0;
3971 }
3972
3973 struct fw_info {
3974         unsigned int text_base;
3975         unsigned int text_len;
3976         u32 *text_data;
3977         unsigned int rodata_base;
3978         unsigned int rodata_len;
3979         u32 *rodata_data;
3980         unsigned int data_base;
3981         unsigned int data_len;
3982         u32 *data_data;
3983 };
3984
3985 /* tp->lock is held. */
3986 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
3987                                  int cpu_scratch_size, struct fw_info *info)
3988 {
3989         int err, i;
3990         u32 orig_tg3_flags = tp->tg3_flags;
3991         void (*write_op)(struct tg3 *, u32, u32);
3992
3993         if (cpu_base == TX_CPU_BASE &&
3994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3995                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
3996                        "TX cpu firmware on %s which is 5705.\n",
3997                        tp->dev->name);
3998                 return -EINVAL;
3999         }
4000
4001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4002                 write_op = tg3_write_mem;
4003         else
4004                 write_op = tg3_write_indirect_reg32;
4005
4006         /* Force use of PCI config space for indirect register
4007          * write calls.
4008          */
4009         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4010
4011         err = tg3_halt_cpu(tp, cpu_base);
4012         if (err)
4013                 goto out;
4014
4015         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4016                 write_op(tp, cpu_scratch_base + i, 0);
4017         tw32(cpu_base + CPU_STATE, 0xffffffff);
4018         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4019         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4020                 write_op(tp, (cpu_scratch_base +
4021                               (info->text_base & 0xffff) +
4022                               (i * sizeof(u32))),
4023                          (info->text_data ?
4024                           info->text_data[i] : 0));
4025         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4026                 write_op(tp, (cpu_scratch_base +
4027                               (info->rodata_base & 0xffff) +
4028                               (i * sizeof(u32))),
4029                          (info->rodata_data ?
4030                           info->rodata_data[i] : 0));
4031         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4032                 write_op(tp, (cpu_scratch_base +
4033                               (info->data_base & 0xffff) +
4034                               (i * sizeof(u32))),
4035                          (info->data_data ?
4036                           info->data_data[i] : 0));
4037
4038         err = 0;
4039
4040 out:
4041         tp->tg3_flags = orig_tg3_flags;
4042         return err;
4043 }
4044
4045 /* tp->lock is held. */
4046 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4047 {
4048         struct fw_info info;
4049         int err, i;
4050
4051         info.text_base = TG3_FW_TEXT_ADDR;
4052         info.text_len = TG3_FW_TEXT_LEN;
4053         info.text_data = &tg3FwText[0];
4054         info.rodata_base = TG3_FW_RODATA_ADDR;
4055         info.rodata_len = TG3_FW_RODATA_LEN;
4056         info.rodata_data = &tg3FwRodata[0];
4057         info.data_base = TG3_FW_DATA_ADDR;
4058         info.data_len = TG3_FW_DATA_LEN;
4059         info.data_data = NULL;
4060
4061         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4062                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4063                                     &info);
4064         if (err)
4065                 return err;
4066
4067         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4068                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4069                                     &info);
4070         if (err)
4071                 return err;
4072
4073         /* Now startup only the RX cpu. */
4074         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4075         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4076
4077         for (i = 0; i < 5; i++) {
4078                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4079                         break;
4080                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4081                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4082                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4083                 udelay(1000);
4084         }
4085         if (i >= 5) {
4086                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4087                        "to set RX CPU PC, is %08x should be %08x\n",
4088                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4089                        TG3_FW_TEXT_ADDR);
4090                 return -ENODEV;
4091         }
4092         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4093         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4094
4095         return 0;
4096 }
4097
4098 #if TG3_TSO_SUPPORT != 0
4099
4100 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4101 #define TG3_TSO_FW_RELASE_MINOR         0x6
4102 #define TG3_TSO_FW_RELEASE_FIX          0x0
4103 #define TG3_TSO_FW_START_ADDR           0x08000000
4104 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4105 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4106 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4107 #define TG3_TSO_FW_RODATA_LEN           0x60
4108 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4109 #define TG3_TSO_FW_DATA_LEN             0x30
4110 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4111 #define TG3_TSO_FW_SBSS_LEN             0x2c
4112 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4113 #define TG3_TSO_FW_BSS_LEN              0x894
4114
4115 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4116         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4117         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4118         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4119         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4120         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4121         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4122         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4123         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4124         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4125         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4126         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4127         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4128         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4129         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4130         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4131         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4132         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4133         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4134         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4135         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4136         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4137         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4138         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4139         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4140         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4141         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4142         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4143         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4144         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4145         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4146         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4147         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4148         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4149         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4150         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4151         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4152         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4153         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4154         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4155         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4156         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4157         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4158         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4159         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4160         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4161         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4162         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4163         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4164         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4165         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4166         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4167         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4168         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4169         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4170         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4171         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4172         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4173         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4174         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4175         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4176         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4177         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4178         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4179         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4180         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4181         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4182         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4183         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4184         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4185         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4186         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4187         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4188         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4189         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4190         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4191         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4192         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4193         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4194         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4195         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4196         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4197         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4198         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4199         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4200         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4201         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4202         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4203         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4204         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4205         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4206         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4207         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4208         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4209         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4210         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4211         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4212         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4213         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4214         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4215         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4216         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4217         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4218         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4219         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4220         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4221         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4222         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4223         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4224         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4225         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4226         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4227         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4228         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4229         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4230         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4231         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4232         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4233         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4234         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4235         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4236         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4237         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4238         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4239         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4240         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4241         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4242         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4243         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4244         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4245         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4246         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4247         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4248         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4249         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4250         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4251         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4252         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4253         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4254         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4255         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4256         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4257         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4258         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4259         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4260         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4261         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4262         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4263         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4264         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4265         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4266         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4267         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4268         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4269         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4270         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4271         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4272         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4273         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4274         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4275         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4276         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4277         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4278         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4279         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4280         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4281         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4282         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4283         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4284         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4285         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4286         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4287         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4288         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4289         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4290         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4291         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4292         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4293         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4294         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4295         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4296         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4297         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4298         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4299         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4300         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4301         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4302         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4303         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4304         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4305         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4306         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4307         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4308         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4309         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4310         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4311         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4312         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4313         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4314         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4315         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4316         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4317         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4318         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4319         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4320         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4321         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4322         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4323         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4324         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4325         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4326         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4327         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4328         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4329         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4330         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4331         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4332         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4333         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4334         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4335         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4336         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4337         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4338         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4339         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4340         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4341         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4342         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4343         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4344         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4345         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4346         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4347         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4348         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4349         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4350         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4351         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4352         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4353         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4354         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4355         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4356         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4357         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4358         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4359         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4360         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4361         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4362         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4363         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4364         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4365         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4366         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4367         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4368         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4369         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4370         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4371         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4372         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4373         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4374         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4375         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4376         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4377         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4378         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4379         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4380         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4381         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4382         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4383         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4384         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4385         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4386         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4387         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4388         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4389         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4390         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4391         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4392         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4393         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4394         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4395         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4396         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4397         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4398         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4399         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4400 };
4401
4402 u32 tg3TsoFwRodata[] = {
4403         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4404         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4405         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4406         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4407         0x00000000,
4408 };
4409
4410 u32 tg3TsoFwData[] = {
4411         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4412         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4413         0x00000000,
4414 };
4415
4416 /* 5705 needs a special version of the TSO firmware.  */
4417 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4418 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4419 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4420 #define TG3_TSO5_FW_START_ADDR          0x00010000
4421 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4422 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4423 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4424 #define TG3_TSO5_FW_RODATA_LEN          0x50
4425 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4426 #define TG3_TSO5_FW_DATA_LEN            0x20
4427 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4428 #define TG3_TSO5_FW_SBSS_LEN            0x28
4429 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4430 #define TG3_TSO5_FW_BSS_LEN             0x88
4431
4432 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4433         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4434         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4435         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4436         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4437         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4438         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4439         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4440         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4441         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4442         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4443         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4444         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4445         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4446         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4447         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4448         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4449         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4450         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4451         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4452         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4453         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4454         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4455         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4456         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4457         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4458         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4459         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4460         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4461         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4462         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4463         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4464         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4465         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4466         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4467         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4468         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4469         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4470         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4471         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4472         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4473         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4474         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4475         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4476         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4477         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4478         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4479         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4480         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4481         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4482         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4483         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4484         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4485         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4486         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4487         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4488         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4489         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4490         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4491         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4492         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4493         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4494         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4495         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4496         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4497         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4498         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4499         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4500         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4501         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4502         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4503         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4504         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4505         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4506         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4507         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4508         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4509         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4510         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4511         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4512         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4513         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4514         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4515         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4516         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4517         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4518         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4519         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4520         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4521         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4522         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4523         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4524         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4525         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4526         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4527         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4528         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4529         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4530         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4531         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4532         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4533         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4534         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4535         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4536         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4537         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4538         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4539         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4540         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4541         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4542         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4543         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4544         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4545         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4546         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4547         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4548         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4549         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4550         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4551         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4552         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4553         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4554         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4555         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4556         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4557         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4558         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4559         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4560         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4561         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4562         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4563         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4564         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4565         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4566         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4567         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4568         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4569         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4570         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4571         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4572         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4573         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4574         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4575         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4576         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4577         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4578         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4579         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4580         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4581         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4582         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4583         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4584         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4585         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4586         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4587         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4588         0x00000000, 0x00000000, 0x00000000,
4589 };
4590
4591 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4592         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4593         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4594         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4595         0x00000000, 0x00000000, 0x00000000,
4596 };
4597
4598 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4599         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4600         0x00000000, 0x00000000, 0x00000000,
4601 };
4602
4603 /* tp->lock is held. */
4604 static int tg3_load_tso_firmware(struct tg3 *tp)
4605 {
4606         struct fw_info info;
4607         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4608         int err, i;
4609
4610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4611                 return 0;
4612
4613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4614                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4615                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4616                 info.text_data = &tg3Tso5FwText[0];
4617                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4618                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4619                 info.rodata_data = &tg3Tso5FwRodata[0];
4620                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4621                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4622                 info.data_data = &tg3Tso5FwData[0];
4623                 cpu_base = RX_CPU_BASE;
4624                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4625                 cpu_scratch_size = (info.text_len +
4626                                     info.rodata_len +
4627                                     info.data_len +
4628                                     TG3_TSO5_FW_SBSS_LEN +
4629                                     TG3_TSO5_FW_BSS_LEN);
4630         } else {
4631                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4632                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4633                 info.text_data = &tg3TsoFwText[0];
4634                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4635                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4636                 info.rodata_data = &tg3TsoFwRodata[0];
4637                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4638                 info.data_len = TG3_TSO_FW_DATA_LEN;
4639                 info.data_data = &tg3TsoFwData[0];
4640                 cpu_base = TX_CPU_BASE;
4641                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4642                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4643         }
4644
4645         err = tg3_load_firmware_cpu(tp, cpu_base,
4646                                     cpu_scratch_base, cpu_scratch_size,
4647                                     &info);
4648         if (err)
4649                 return err;
4650
4651         /* Now startup the cpu. */
4652         tw32(cpu_base + CPU_STATE, 0xffffffff);
4653         tw32_f(cpu_base + CPU_PC,    info.text_base);
4654
4655         for (i = 0; i < 5; i++) {
4656                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4657                         break;
4658                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4659                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4660                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4661                 udelay(1000);
4662         }
4663         if (i >= 5) {
4664                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4665                        "to set CPU PC, is %08x should be %08x\n",
4666                        tp->dev->name, tr32(cpu_base + CPU_PC),
4667                        info.text_base);
4668                 return -ENODEV;
4669         }
4670         tw32(cpu_base + CPU_STATE, 0xffffffff);
4671         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4672         return 0;
4673 }
4674
4675 #endif /* TG3_TSO_SUPPORT != 0 */
4676
4677 /* tp->lock is held. */
4678 static void __tg3_set_mac_addr(struct tg3 *tp)
4679 {
4680         u32 addr_high, addr_low;
4681         int i;
4682
4683         addr_high = ((tp->dev->dev_addr[0] << 8) |
4684                      tp->dev->dev_addr[1]);
4685         addr_low = ((tp->dev->dev_addr[2] << 24) |
4686                     (tp->dev->dev_addr[3] << 16) |
4687                     (tp->dev->dev_addr[4] <<  8) |
4688                     (tp->dev->dev_addr[5] <<  0));
4689         for (i = 0; i < 4; i++) {
4690                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4691                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4692         }
4693
4694         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4695             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4696             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4697                 for (i = 0; i < 12; i++) {
4698                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4699                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4700                 }
4701         }
4702
4703         addr_high = (tp->dev->dev_addr[0] +
4704                      tp->dev->dev_addr[1] +
4705                      tp->dev->dev_addr[2] +
4706                      tp->dev->dev_addr[3] +
4707                      tp->dev->dev_addr[4] +
4708                      tp->dev->dev_addr[5]) &
4709                 TX_BACKOFF_SEED_MASK;
4710         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4711 }
4712
4713 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4714 {
4715         struct tg3 *tp = netdev_priv(dev);
4716         struct sockaddr *addr = p;
4717
4718         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4719
4720         spin_lock_irq(&tp->lock);
4721         __tg3_set_mac_addr(tp);
4722         spin_unlock_irq(&tp->lock);
4723
4724         return 0;
4725 }
4726
4727 /* tp->lock is held. */
4728 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4729                            dma_addr_t mapping, u32 maxlen_flags,
4730                            u32 nic_addr)
4731 {
4732         tg3_write_mem(tp,
4733                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4734                       ((u64) mapping >> 32));
4735         tg3_write_mem(tp,
4736                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4737                       ((u64) mapping & 0xffffffff));
4738         tg3_write_mem(tp,
4739                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4740                        maxlen_flags);
4741
4742         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4743                 tg3_write_mem(tp,
4744                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4745                               nic_addr);
4746 }
4747
4748 static void __tg3_set_rx_mode(struct net_device *);
4749
4750 /* tp->lock is held. */
4751 static int tg3_reset_hw(struct tg3 *tp)
4752 {
4753         u32 val, rdmac_mode;
4754         int i, err, limit;
4755
4756         tg3_disable_ints(tp);
4757
4758         tg3_stop_fw(tp);
4759
4760         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4761
4762         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4763                 err = tg3_abort_hw(tp);
4764                 if (err)
4765                         return err;
4766         }
4767
4768         err = tg3_chip_reset(tp);
4769         if (err)
4770                 return err;
4771
4772         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4773
4774         /* This works around an issue with Athlon chipsets on
4775          * B3 tigon3 silicon.  This bit has no effect on any
4776          * other revision.  But do not set this on PCI Express
4777          * chips.
4778          */
4779         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4780                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4781         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4782
4783         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4784             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4785                 val = tr32(TG3PCI_PCISTATE);
4786                 val |= PCISTATE_RETRY_SAME_DMA;
4787                 tw32(TG3PCI_PCISTATE, val);
4788         }
4789
4790         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4791                 /* Enable some hw fixes.  */
4792                 val = tr32(TG3PCI_MSI_DATA);
4793                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4794                 tw32(TG3PCI_MSI_DATA, val);
4795         }
4796
4797         /* Descriptor ring init may make accesses to the
4798          * NIC SRAM area to setup the TX descriptors, so we
4799          * can only do this after the hardware has been
4800          * successfully reset.
4801          */
4802         tg3_init_rings(tp);
4803
4804         /* This value is determined during the probe time DMA
4805          * engine test, tg3_test_dma.
4806          */
4807         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4808
4809         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4810                           GRC_MODE_4X_NIC_SEND_RINGS |
4811                           GRC_MODE_NO_TX_PHDR_CSUM |
4812                           GRC_MODE_NO_RX_PHDR_CSUM);
4813         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4814                 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4815         else
4816                 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4817         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4818                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4819         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4820                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4821
4822         tw32(GRC_MODE,
4823              tp->grc_mode |
4824              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4825
4826         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4827         val = tr32(GRC_MISC_CFG);
4828         val &= ~0xff;
4829         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4830         tw32(GRC_MISC_CFG, val);
4831
4832         /* Initialize MBUF/DESC pool. */
4833         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4834                 /* Do nothing.  */
4835         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4836                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4837                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4838                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4839                 else
4840                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4841                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4842                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4843         }
4844 #if TG3_TSO_SUPPORT != 0
4845         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4846                 int fw_len;
4847
4848                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4849                           TG3_TSO5_FW_RODATA_LEN +
4850                           TG3_TSO5_FW_DATA_LEN +
4851                           TG3_TSO5_FW_SBSS_LEN +
4852                           TG3_TSO5_FW_BSS_LEN);
4853                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4854                 tw32(BUFMGR_MB_POOL_ADDR,
4855                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4856                 tw32(BUFMGR_MB_POOL_SIZE,
4857                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4858         }
4859 #endif
4860
4861         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4862                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4863                      tp->bufmgr_config.mbuf_read_dma_low_water);
4864                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4865                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4866                 tw32(BUFMGR_MB_HIGH_WATER,
4867                      tp->bufmgr_config.mbuf_high_water);
4868         } else {
4869                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4870                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4871                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4872                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4873                 tw32(BUFMGR_MB_HIGH_WATER,
4874                      tp->bufmgr_config.mbuf_high_water_jumbo);
4875         }
4876         tw32(BUFMGR_DMA_LOW_WATER,
4877              tp->bufmgr_config.dma_low_water);
4878         tw32(BUFMGR_DMA_HIGH_WATER,
4879              tp->bufmgr_config.dma_high_water);
4880
4881         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4882         for (i = 0; i < 2000; i++) {
4883                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4884                         break;
4885                 udelay(10);
4886         }
4887         if (i >= 2000) {
4888                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4889                        tp->dev->name);
4890                 return -ENODEV;
4891         }
4892
4893         /* Setup replenish threshold. */
4894         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4895
4896         /* Initialize TG3_BDINFO's at:
4897          *  RCVDBDI_STD_BD:     standard eth size rx ring
4898          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
4899          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
4900          *
4901          * like so:
4902          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
4903          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
4904          *                              ring attribute flags
4905          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
4906          *
4907          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4908          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4909          *
4910          * The size of each ring is fixed in the firmware, but the location is
4911          * configurable.
4912          */
4913         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4914              ((u64) tp->rx_std_mapping >> 32));
4915         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4916              ((u64) tp->rx_std_mapping & 0xffffffff));
4917         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
4918              NIC_SRAM_RX_BUFFER_DESC);
4919
4920         /* Don't even try to program the JUMBO/MINI buffer descriptor
4921          * configs on 5705.
4922          */
4923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
4924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4925                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4926                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
4927         } else {
4928                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4929                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4930
4931                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
4932                      BDINFO_FLAGS_DISABLED);
4933
4934                 /* Setup replenish threshold. */
4935                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
4936
4937                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
4938                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4939                              ((u64) tp->rx_jumbo_mapping >> 32));
4940                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4941                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
4942                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4943                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4944                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
4945                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
4946                 } else {
4947                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4948                              BDINFO_FLAGS_DISABLED);
4949                 }
4950
4951         }
4952
4953         /* There is only one send ring on 5705/5750, no need to explicitly
4954          * disable the others.
4955          */
4956         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
4957             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
4958                 /* Clear out send RCB ring in SRAM. */
4959                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
4960                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4961                                       BDINFO_FLAGS_DISABLED);
4962         }
4963
4964         tp->tx_prod = 0;
4965         tp->tx_cons = 0;
4966         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4967         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4968
4969         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
4970                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4971                                tp->tx_desc_mapping,
4972                                (TG3_TX_RING_SIZE <<
4973                                 BDINFO_FLAGS_MAXLEN_SHIFT),
4974                                NIC_SRAM_TX_BUFFER_DESC);
4975         } else {
4976                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4977                                0,
4978                                BDINFO_FLAGS_DISABLED,
4979                                NIC_SRAM_TX_BUFFER_DESC);
4980         }
4981
4982         /* There is only one receive return ring on 5705/5750, no need
4983          * to explicitly disable the others.
4984          */
4985         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
4986             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
4987                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
4988                      i += TG3_BDINFO_SIZE) {
4989                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4990                                       BDINFO_FLAGS_DISABLED);
4991                 }
4992         }
4993
4994         tp->rx_rcb_ptr = 0;
4995         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
4996
4997         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
4998                        tp->rx_rcb_mapping,
4999                        (TG3_RX_RCB_RING_SIZE(tp) <<
5000                         BDINFO_FLAGS_MAXLEN_SHIFT),
5001                        0);
5002
5003         tp->rx_std_ptr = tp->rx_pending;
5004         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5005                      tp->rx_std_ptr);
5006
5007         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5008                                                 tp->rx_jumbo_pending : 0;
5009         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5010                      tp->rx_jumbo_ptr);
5011
5012         /* Initialize MAC address and backoff seed. */
5013         __tg3_set_mac_addr(tp);
5014
5015         /* MTU + ethernet header + FCS + optional VLAN tag */
5016         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5017
5018         /* The slot time is changed by tg3_setup_phy if we
5019          * run at gigabit with half duplex.
5020          */
5021         tw32(MAC_TX_LENGTHS,
5022              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5023              (6 << TX_LENGTHS_IPG_SHIFT) |
5024              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5025
5026         /* Receive rules. */
5027         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5028         tw32(RCVLPC_CONFIG, 0x0181);
5029
5030         /* Calculate RDMAC_MODE setting early, we need it to determine
5031          * the RCVLPC_STATE_ENABLE mask.
5032          */
5033         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5034                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5035                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5036                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5037                       RDMAC_MODE_LNGREAD_ENAB);
5038         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5039                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5040         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5041              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5042             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5043                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5044                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5045                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5046                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5047                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5048                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5049                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5050                 }
5051         }
5052
5053 #if TG3_TSO_SUPPORT != 0
5054         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5055                 rdmac_mode |= (1 << 27);
5056 #endif
5057
5058         /* Receive/send statistics. */
5059         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5060             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5061                 val = tr32(RCVLPC_STATS_ENABLE);
5062                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5063                 tw32(RCVLPC_STATS_ENABLE, val);
5064         } else {
5065                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5066         }
5067         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5068         tw32(SNDDATAI_STATSENAB, 0xffffff);
5069         tw32(SNDDATAI_STATSCTRL,
5070              (SNDDATAI_SCTRL_ENABLE |
5071               SNDDATAI_SCTRL_FASTUPD));
5072
5073         /* Setup host coalescing engine. */
5074         tw32(HOSTCC_MODE, 0);
5075         for (i = 0; i < 2000; i++) {
5076                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5077                         break;
5078                 udelay(10);
5079         }
5080
5081         tw32(HOSTCC_RXCOL_TICKS, 0);
5082         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5083         tw32(HOSTCC_RXMAX_FRAMES, 1);
5084         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5085         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5086             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5087                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5088                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5089         }
5090         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5091         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5092
5093         /* set status block DMA address */
5094         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5095              ((u64) tp->status_mapping >> 32));
5096         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5097              ((u64) tp->status_mapping & 0xffffffff));
5098
5099         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5100             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5101                 /* Status/statistics block address.  See tg3_timer,
5102                  * the tg3_periodic_fetch_stats call there, and
5103                  * tg3_get_stats to see how this works for 5705/5750 chips.
5104                  */
5105                 tw32(HOSTCC_STAT_COAL_TICKS,
5106                      DEFAULT_STAT_COAL_TICKS);
5107                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5108                      ((u64) tp->stats_mapping >> 32));
5109                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5110                      ((u64) tp->stats_mapping & 0xffffffff));
5111                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5112                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5113         }
5114
5115         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5116
5117         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5118         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5119         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5120             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5121                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5122
5123         /* Clear statistics/status block in chip, and status block in ram. */
5124         for (i = NIC_SRAM_STATS_BLK;
5125              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5126              i += sizeof(u32)) {
5127                 tg3_write_mem(tp, i, 0);
5128                 udelay(40);
5129         }
5130         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5131
5132         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5133                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5134         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5135         udelay(40);
5136
5137         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5139                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5140                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5141         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5142         udelay(100);
5143
5144         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5145         tr32(MAILBOX_INTERRUPT_0);
5146
5147         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5148             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5149                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5150                 udelay(40);
5151         }
5152
5153         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5154                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5155                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5156                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5157                WDMAC_MODE_LNGREAD_ENAB);
5158
5159         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5160              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5162                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5163                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5164                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5165                         /* nothing */
5166                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5167                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5168                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5169                         val |= WDMAC_MODE_RX_ACCEL;
5170                 }
5171         }
5172
5173         tw32_f(WDMAC_MODE, val);
5174         udelay(40);
5175
5176         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5177                 val = tr32(TG3PCI_X_CAPS);
5178                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5179                         val &= ~PCIX_CAPS_BURST_MASK;
5180                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5181                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5182                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5183                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5184                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5185                                 val |= (tp->split_mode_max_reqs <<
5186                                         PCIX_CAPS_SPLIT_SHIFT);
5187                 }
5188                 tw32(TG3PCI_X_CAPS, val);
5189         }
5190
5191         tw32_f(RDMAC_MODE, rdmac_mode);
5192         udelay(40);
5193
5194         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5195         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5196             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5197                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5198         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5199         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5200         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5201         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5202         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5203 #if TG3_TSO_SUPPORT != 0
5204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5205                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5206 #endif
5207         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5208         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5209
5210         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5211                 err = tg3_load_5701_a0_firmware_fix(tp);
5212                 if (err)
5213                         return err;
5214         }
5215
5216 #if TG3_TSO_SUPPORT != 0
5217         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5218                 err = tg3_load_tso_firmware(tp);
5219                 if (err)
5220                         return err;
5221         }
5222 #endif
5223
5224         tp->tx_mode = TX_MODE_ENABLE;
5225         tw32_f(MAC_TX_MODE, tp->tx_mode);
5226         udelay(100);
5227
5228         tp->rx_mode = RX_MODE_ENABLE;
5229         tw32_f(MAC_RX_MODE, tp->rx_mode);
5230         udelay(10);
5231
5232         if (tp->link_config.phy_is_low_power) {
5233                 tp->link_config.phy_is_low_power = 0;
5234                 tp->link_config.speed = tp->link_config.orig_speed;
5235                 tp->link_config.duplex = tp->link_config.orig_duplex;
5236                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5237         }
5238
5239         tp->mi_mode = MAC_MI_MODE_BASE;
5240         tw32_f(MAC_MI_MODE, tp->mi_mode);
5241         udelay(80);
5242
5243         tw32(MAC_LED_CTRL, tp->led_ctrl);
5244
5245         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5246         if (tp->phy_id == PHY_ID_SERDES) {
5247                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5248                 udelay(10);
5249         }
5250         tw32_f(MAC_RX_MODE, tp->rx_mode);
5251         udelay(10);
5252
5253         if (tp->phy_id == PHY_ID_SERDES) {
5254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5255                         /* Set drive transmission level to 1.2V  */
5256                         val = tr32(MAC_SERDES_CFG);
5257                         val &= 0xfffff000;
5258                         val |= 0x880;
5259                         tw32(MAC_SERDES_CFG, val);
5260                 }
5261                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5262                         tw32(MAC_SERDES_CFG, 0x616000);
5263         }
5264
5265         /* Prevent chip from dropping frames when flow control
5266          * is enabled.
5267          */
5268         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5269
5270         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5271             tp->phy_id == PHY_ID_SERDES) {
5272                 /* Enable hardware link auto-negotiation */
5273                 u32 digctrl, txctrl;
5274
5275                 digctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_CRC16_CLEAR_N |
5276                     SG_DIG_LOCAL_DUPLEX_STATUS | SG_DIG_LOCAL_LINK_STATUS |
5277                     (2 << SG_DIG_SPEED_STATUS_SHIFT) | SG_DIG_FIBER_MODE |
5278                     SG_DIG_GBIC_ENABLE;
5279
5280                 txctrl = tr32(MAC_SERDES_CFG);
5281                 tw32_f(MAC_SERDES_CFG, txctrl | MAC_SERDES_CFG_EDGE_SELECT);
5282                 tw32_f(SG_DIG_CTRL, digctrl | SG_DIG_SOFT_RESET);
5283                 tr32(SG_DIG_CTRL);
5284                 udelay(5);
5285                 tw32_f(SG_DIG_CTRL, digctrl);
5286
5287                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5288         }
5289
5290         err = tg3_setup_phy(tp, 1);
5291         if (err)
5292                 return err;
5293
5294         if (tp->phy_id != PHY_ID_SERDES) {
5295                 u32 tmp;
5296
5297                 /* Clear CRC stats. */
5298                 tg3_readphy(tp, 0x1e, &tmp);
5299                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5300                 tg3_readphy(tp, 0x14, &tmp);
5301         }
5302
5303         __tg3_set_rx_mode(tp->dev);
5304
5305         /* Initialize receive rules. */
5306         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5307         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5308         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5309         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5310
5311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5312             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5313                 limit = 8;
5314         else
5315                 limit = 16;
5316         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5317                 limit -= 4;
5318         switch (limit) {
5319         case 16:
5320                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5321         case 15:
5322                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5323         case 14:
5324                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5325         case 13:
5326                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5327         case 12:
5328                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5329         case 11:
5330                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5331         case 10:
5332                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5333         case 9:
5334                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5335         case 8:
5336                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5337         case 7:
5338                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5339         case 6:
5340                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5341         case 5:
5342                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5343         case 4:
5344                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5345         case 3:
5346                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5347         case 2:
5348         case 1:
5349
5350         default:
5351                 break;
5352         };
5353
5354         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5355
5356         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5357                 tg3_enable_ints(tp);
5358
5359         return 0;
5360 }
5361
5362 /* Called at device open time to get the chip ready for
5363  * packet processing.  Invoked with tp->lock held.
5364  */
5365 static int tg3_init_hw(struct tg3 *tp)
5366 {
5367         int err;
5368
5369         /* Force the chip into D0. */
5370         err = tg3_set_power_state(tp, 0);
5371         if (err)
5372                 goto out;
5373
5374         tg3_switch_clocks(tp);
5375
5376         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5377
5378         err = tg3_reset_hw(tp);
5379
5380 out:
5381         return err;
5382 }
5383
5384 #define TG3_STAT_ADD32(PSTAT, REG) \
5385 do {    u32 __val = tr32(REG); \
5386         (PSTAT)->low += __val; \
5387         if ((PSTAT)->low < __val) \
5388                 (PSTAT)->high += 1; \
5389 } while (0)
5390
5391 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5392 {
5393         struct tg3_hw_stats *sp = tp->hw_stats;
5394
5395         if (!netif_carrier_ok(tp->dev))
5396                 return;
5397
5398         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5399         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5400         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5401         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5402         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5403         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5404         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5405         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5406         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5407         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5408         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5409         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5410         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5411
5412         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5413         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5414         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5415         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5416         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5417         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5418         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5419         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5420         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5421         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5422         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5423         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5424         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5425         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5426 }
5427
5428 static void tg3_timer(unsigned long __opaque)
5429 {
5430         struct tg3 *tp = (struct tg3 *) __opaque;
5431         unsigned long flags;
5432
5433         spin_lock_irqsave(&tp->lock, flags);
5434         spin_lock(&tp->tx_lock);
5435
5436         /* All of this garbage is because when using non-tagged
5437          * IRQ status the mailbox/status_block protocol the chip
5438          * uses with the cpu is race prone.
5439          */
5440         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5441                 tw32(GRC_LOCAL_CTRL,
5442                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5443         } else {
5444                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5445                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5446         }
5447
5448         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5449                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5450                 spin_unlock(&tp->tx_lock);
5451                 spin_unlock_irqrestore(&tp->lock, flags);
5452                 schedule_work(&tp->reset_task);
5453                 return;
5454         }
5455
5456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5457             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5458                 tg3_periodic_fetch_stats(tp);
5459
5460         /* This part only runs once per second. */
5461         if (!--tp->timer_counter) {
5462                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5463                         u32 mac_stat;
5464                         int phy_event;
5465
5466                         mac_stat = tr32(MAC_STATUS);
5467
5468                         phy_event = 0;
5469                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5470                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5471                                         phy_event = 1;
5472                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5473                                 phy_event = 1;
5474
5475                         if (phy_event)
5476                                 tg3_setup_phy(tp, 0);
5477                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5478                         u32 mac_stat = tr32(MAC_STATUS);
5479                         int need_setup = 0;
5480
5481                         if (netif_carrier_ok(tp->dev) &&
5482                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5483                                 need_setup = 1;
5484                         }
5485                         if (! netif_carrier_ok(tp->dev) &&
5486                             (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5487                                 need_setup = 1;
5488                         }
5489                         if (need_setup) {
5490                                 tw32_f(MAC_MODE,
5491                                      (tp->mac_mode &
5492                                       ~MAC_MODE_PORT_MODE_MASK));
5493                                 udelay(40);
5494                                 tw32_f(MAC_MODE, tp->mac_mode);
5495                                 udelay(40);
5496                                 tg3_setup_phy(tp, 0);
5497                         }
5498                 }
5499
5500                 tp->timer_counter = tp->timer_multiplier;
5501         }
5502
5503         /* Heartbeat is only sent once every 120 seconds.  */
5504         if (!--tp->asf_counter) {
5505                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5506                         u32 val;
5507
5508                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5509                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5510                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5511                         val = tr32(GRC_RX_CPU_EVENT);
5512                         val |= (1 << 14);
5513                         tw32(GRC_RX_CPU_EVENT, val);
5514                 }
5515                 tp->asf_counter = tp->asf_multiplier;
5516         }
5517
5518         spin_unlock(&tp->tx_lock);
5519         spin_unlock_irqrestore(&tp->lock, flags);
5520
5521         tp->timer.expires = jiffies + tp->timer_offset;
5522         add_timer(&tp->timer);
5523 }
5524
5525 static int tg3_open(struct net_device *dev)
5526 {
5527         struct tg3 *tp = netdev_priv(dev);
5528         int err;
5529
5530         spin_lock_irq(&tp->lock);
5531         spin_lock(&tp->tx_lock);
5532
5533         tg3_disable_ints(tp);
5534         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5535
5536         spin_unlock(&tp->tx_lock);
5537         spin_unlock_irq(&tp->lock);
5538
5539         /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5540          * tp->tg3_flags is accurate at that new place.
5541          */
5542         err = tg3_alloc_consistent(tp);
5543         if (err)
5544                 return err;
5545
5546         err = request_irq(dev->irq, tg3_interrupt,
5547                           SA_SHIRQ, dev->name, dev);
5548
5549         if (err) {
5550                 tg3_free_consistent(tp);
5551                 return err;
5552         }
5553
5554         spin_lock_irq(&tp->lock);
5555         spin_lock(&tp->tx_lock);
5556
5557         err = tg3_init_hw(tp);
5558         if (err) {
5559                 tg3_halt(tp);
5560                 tg3_free_rings(tp);
5561         } else {
5562                 tp->timer_offset = HZ / 10;
5563                 tp->timer_counter = tp->timer_multiplier = 10;
5564                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5565
5566                 init_timer(&tp->timer);
5567                 tp->timer.expires = jiffies + tp->timer_offset;
5568                 tp->timer.data = (unsigned long) tp;
5569                 tp->timer.function = tg3_timer;
5570                 add_timer(&tp->timer);
5571
5572                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5573         }
5574
5575         spin_unlock(&tp->tx_lock);
5576         spin_unlock_irq(&tp->lock);
5577
5578         if (err) {
5579                 free_irq(dev->irq, dev);
5580                 tg3_free_consistent(tp);
5581                 return err;
5582         }
5583
5584         spin_lock_irq(&tp->lock);
5585         spin_lock(&tp->tx_lock);
5586
5587         tg3_enable_ints(tp);
5588
5589         spin_unlock(&tp->tx_lock);
5590         spin_unlock_irq(&tp->lock);
5591
5592         netif_start_queue(dev);
5593
5594         return 0;
5595 }
5596
5597 #if 0
5598 /*static*/ void tg3_dump_state(struct tg3 *tp)
5599 {
5600         u32 val32, val32_2, val32_3, val32_4, val32_5;
5601         u16 val16;
5602         int i;
5603
5604         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5605         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5606         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5607                val16, val32);
5608
5609         /* MAC block */
5610         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5611                tr32(MAC_MODE), tr32(MAC_STATUS));
5612         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5613                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5614         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5615                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5616         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5617                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5618
5619         /* Send data initiator control block */
5620         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5621                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5622         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5623                tr32(SNDDATAI_STATSCTRL));
5624
5625         /* Send data completion control block */
5626         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5627
5628         /* Send BD ring selector block */
5629         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5630                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5631
5632         /* Send BD initiator control block */
5633         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5634                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5635
5636         /* Send BD completion control block */
5637         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5638
5639         /* Receive list placement control block */
5640         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5641                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5642         printk("       RCVLPC_STATSCTRL[%08x]\n",
5643                tr32(RCVLPC_STATSCTRL));
5644
5645         /* Receive data and receive BD initiator control block */
5646         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5647                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5648
5649         /* Receive data completion control block */
5650         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5651                tr32(RCVDCC_MODE));
5652
5653         /* Receive BD initiator control block */
5654         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5655                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5656
5657         /* Receive BD completion control block */
5658         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5659                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5660
5661         /* Receive list selector control block */
5662         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5663                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5664
5665         /* Mbuf cluster free block */
5666         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5667                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5668
5669         /* Host coalescing control block */
5670         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5671                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5672         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5673                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5674                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5675         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5676                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5677                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5678         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5679                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5680         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5681                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5682
5683         /* Memory arbiter control block */
5684         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5685                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5686
5687         /* Buffer manager control block */
5688         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5689                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5690         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5691                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5692         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5693                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5694                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5695                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5696
5697         /* Read DMA control block */
5698         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5699                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5700
5701         /* Write DMA control block */
5702         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5703                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5704
5705         /* DMA completion block */
5706         printk("DEBUG: DMAC_MODE[%08x]\n",
5707                tr32(DMAC_MODE));
5708
5709         /* GRC block */
5710         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5711                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5712         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5713                tr32(GRC_LOCAL_CTRL));
5714
5715         /* TG3_BDINFOs */
5716         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5717                tr32(RCVDBDI_JUMBO_BD + 0x0),
5718                tr32(RCVDBDI_JUMBO_BD + 0x4),
5719                tr32(RCVDBDI_JUMBO_BD + 0x8),
5720                tr32(RCVDBDI_JUMBO_BD + 0xc));
5721         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5722                tr32(RCVDBDI_STD_BD + 0x0),
5723                tr32(RCVDBDI_STD_BD + 0x4),
5724                tr32(RCVDBDI_STD_BD + 0x8),
5725                tr32(RCVDBDI_STD_BD + 0xc));
5726         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5727                tr32(RCVDBDI_MINI_BD + 0x0),
5728                tr32(RCVDBDI_MINI_BD + 0x4),
5729                tr32(RCVDBDI_MINI_BD + 0x8),
5730                tr32(RCVDBDI_MINI_BD + 0xc));
5731
5732         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5733         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5734         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5735         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5736         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5737                val32, val32_2, val32_3, val32_4);
5738
5739         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5740         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5741         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5742         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5743         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5744                val32, val32_2, val32_3, val32_4);
5745
5746         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5747         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5748         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5749         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5750         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5751         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5752                val32, val32_2, val32_3, val32_4, val32_5);
5753
5754         /* SW status block */
5755         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5756                tp->hw_status->status,
5757                tp->hw_status->status_tag,
5758                tp->hw_status->rx_jumbo_consumer,
5759                tp->hw_status->rx_consumer,
5760                tp->hw_status->rx_mini_consumer,
5761                tp->hw_status->idx[0].rx_producer,
5762                tp->hw_status->idx[0].tx_consumer);
5763
5764         /* SW statistics block */
5765         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5766                ((u32 *)tp->hw_stats)[0],
5767                ((u32 *)tp->hw_stats)[1],
5768                ((u32 *)tp->hw_stats)[2],
5769                ((u32 *)tp->hw_stats)[3]);
5770
5771         /* Mailboxes */
5772         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5773                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5774                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5775                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5776                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5777
5778         /* NIC side send descriptors. */
5779         for (i = 0; i < 6; i++) {
5780                 unsigned long txd;
5781
5782                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5783                         + (i * sizeof(struct tg3_tx_buffer_desc));
5784                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5785                        i,
5786                        readl(txd + 0x0), readl(txd + 0x4),
5787                        readl(txd + 0x8), readl(txd + 0xc));
5788         }
5789
5790         /* NIC side RX descriptors. */
5791         for (i = 0; i < 6; i++) {
5792                 unsigned long rxd;
5793
5794                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5795                         + (i * sizeof(struct tg3_rx_buffer_desc));
5796                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5797                        i,
5798                        readl(rxd + 0x0), readl(rxd + 0x4),
5799                        readl(rxd + 0x8), readl(rxd + 0xc));
5800                 rxd += (4 * sizeof(u32));
5801                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5802                        i,
5803                        readl(rxd + 0x0), readl(rxd + 0x4),
5804                        readl(rxd + 0x8), readl(rxd + 0xc));
5805         }
5806
5807         for (i = 0; i < 6; i++) {
5808                 unsigned long rxd;
5809
5810                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5811                         + (i * sizeof(struct tg3_rx_buffer_desc));
5812                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5813                        i,
5814                        readl(rxd + 0x0), readl(rxd + 0x4),
5815                        readl(rxd + 0x8), readl(rxd + 0xc));
5816                 rxd += (4 * sizeof(u32));
5817                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5818                        i,
5819                        readl(rxd + 0x0), readl(rxd + 0x4),
5820                        readl(rxd + 0x8), readl(rxd + 0xc));
5821         }
5822 }
5823 #endif
5824
5825 static struct net_device_stats *tg3_get_stats(struct net_device *);
5826 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5827
5828 static int tg3_close(struct net_device *dev)
5829 {
5830         struct tg3 *tp = netdev_priv(dev);
5831
5832         netif_stop_queue(dev);
5833
5834         del_timer_sync(&tp->timer);
5835
5836         spin_lock_irq(&tp->lock);
5837         spin_lock(&tp->tx_lock);
5838 #if 0
5839         tg3_dump_state(tp);
5840 #endif
5841
5842         tg3_disable_ints(tp);
5843
5844         tg3_halt(tp);
5845         tg3_free_rings(tp);
5846         tp->tg3_flags &=
5847                 ~(TG3_FLAG_INIT_COMPLETE |
5848                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5849         netif_carrier_off(tp->dev);
5850
5851         spin_unlock(&tp->tx_lock);
5852         spin_unlock_irq(&tp->lock);
5853
5854         free_irq(dev->irq, dev);
5855
5856         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5857                sizeof(tp->net_stats_prev));
5858         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5859                sizeof(tp->estats_prev));
5860
5861         tg3_free_consistent(tp);
5862
5863         return 0;
5864 }
5865
5866 static inline unsigned long get_stat64(tg3_stat64_t *val)
5867 {
5868         unsigned long ret;
5869
5870 #if (BITS_PER_LONG == 32)
5871         ret = val->low;
5872 #else
5873         ret = ((u64)val->high << 32) | ((u64)val->low);
5874 #endif
5875         return ret;
5876 }
5877
5878 static unsigned long calc_crc_errors(struct tg3 *tp)
5879 {
5880         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5881
5882         if (tp->phy_id != PHY_ID_SERDES &&
5883             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5884              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5885                 unsigned long flags;
5886                 u32 val;
5887
5888                 spin_lock_irqsave(&tp->lock, flags);
5889                 tg3_readphy(tp, 0x1e, &val);
5890                 tg3_writephy(tp, 0x1e, val | 0x8000);
5891                 tg3_readphy(tp, 0x14, &val);
5892                 spin_unlock_irqrestore(&tp->lock, flags);
5893
5894                 tp->phy_crc_errors += val;
5895
5896                 return tp->phy_crc_errors;
5897         }
5898
5899         return get_stat64(&hw_stats->rx_fcs_errors);
5900 }
5901
5902 #define ESTAT_ADD(member) \
5903         estats->member =        old_estats->member + \
5904                                 get_stat64(&hw_stats->member)
5905
5906 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5907 {
5908         struct tg3_ethtool_stats *estats = &tp->estats;
5909         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5910         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5911
5912         if (!hw_stats)
5913                 return old_estats;
5914
5915         ESTAT_ADD(rx_octets);
5916         ESTAT_ADD(rx_fragments);
5917         ESTAT_ADD(rx_ucast_packets);
5918         ESTAT_ADD(rx_mcast_packets);
5919         ESTAT_ADD(rx_bcast_packets);
5920         ESTAT_ADD(rx_fcs_errors);
5921         ESTAT_ADD(rx_align_errors);
5922         ESTAT_ADD(rx_xon_pause_rcvd);
5923         ESTAT_ADD(rx_xoff_pause_rcvd);
5924         ESTAT_ADD(rx_mac_ctrl_rcvd);
5925         ESTAT_ADD(rx_xoff_entered);
5926         ESTAT_ADD(rx_frame_too_long_errors);
5927         ESTAT_ADD(rx_jabbers);
5928         ESTAT_ADD(rx_undersize_packets);
5929         ESTAT_ADD(rx_in_length_errors);
5930         ESTAT_ADD(rx_out_length_errors);
5931         ESTAT_ADD(rx_64_or_less_octet_packets);
5932         ESTAT_ADD(rx_65_to_127_octet_packets);
5933         ESTAT_ADD(rx_128_to_255_octet_packets);
5934         ESTAT_ADD(rx_256_to_511_octet_packets);
5935         ESTAT_ADD(rx_512_to_1023_octet_packets);
5936         ESTAT_ADD(rx_1024_to_1522_octet_packets);
5937         ESTAT_ADD(rx_1523_to_2047_octet_packets);
5938         ESTAT_ADD(rx_2048_to_4095_octet_packets);
5939         ESTAT_ADD(rx_4096_to_8191_octet_packets);
5940         ESTAT_ADD(rx_8192_to_9022_octet_packets);
5941
5942         ESTAT_ADD(tx_octets);
5943         ESTAT_ADD(tx_collisions);
5944         ESTAT_ADD(tx_xon_sent);
5945         ESTAT_ADD(tx_xoff_sent);
5946         ESTAT_ADD(tx_flow_control);
5947         ESTAT_ADD(tx_mac_errors);
5948         ESTAT_ADD(tx_single_collisions);
5949         ESTAT_ADD(tx_mult_collisions);
5950         ESTAT_ADD(tx_deferred);
5951         ESTAT_ADD(tx_excessive_collisions);
5952         ESTAT_ADD(tx_late_collisions);
5953         ESTAT_ADD(tx_collide_2times);
5954         ESTAT_ADD(tx_collide_3times);
5955         ESTAT_ADD(tx_collide_4times);
5956         ESTAT_ADD(tx_collide_5times);
5957         ESTAT_ADD(tx_collide_6times);
5958         ESTAT_ADD(tx_collide_7times);
5959         ESTAT_ADD(tx_collide_8times);
5960         ESTAT_ADD(tx_collide_9times);
5961         ESTAT_ADD(tx_collide_10times);
5962         ESTAT_ADD(tx_collide_11times);
5963         ESTAT_ADD(tx_collide_12times);
5964         ESTAT_ADD(tx_collide_13times);
5965         ESTAT_ADD(tx_collide_14times);
5966         ESTAT_ADD(tx_collide_15times);
5967         ESTAT_ADD(tx_ucast_packets);
5968         ESTAT_ADD(tx_mcast_packets);
5969         ESTAT_ADD(tx_bcast_packets);
5970         ESTAT_ADD(tx_carrier_sense_errors);
5971         ESTAT_ADD(tx_discards);
5972         ESTAT_ADD(tx_errors);
5973
5974         ESTAT_ADD(dma_writeq_full);
5975         ESTAT_ADD(dma_write_prioq_full);
5976         ESTAT_ADD(rxbds_empty);
5977         ESTAT_ADD(rx_discards);
5978         ESTAT_ADD(rx_errors);
5979         ESTAT_ADD(rx_threshold_hit);
5980
5981         ESTAT_ADD(dma_readq_full);
5982         ESTAT_ADD(dma_read_prioq_full);
5983         ESTAT_ADD(tx_comp_queue_full);
5984
5985         ESTAT_ADD(ring_set_send_prod_index);
5986         ESTAT_ADD(ring_status_update);
5987         ESTAT_ADD(nic_irqs);
5988         ESTAT_ADD(nic_avoided_irqs);
5989         ESTAT_ADD(nic_tx_threshold_hit);
5990
5991         return estats;
5992 }
5993
5994 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
5995 {
5996         struct tg3 *tp = netdev_priv(dev);
5997         struct net_device_stats *stats = &tp->net_stats;
5998         struct net_device_stats *old_stats = &tp->net_stats_prev;
5999         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6000
6001         if (!hw_stats)
6002                 return old_stats;
6003
6004         stats->rx_packets = old_stats->rx_packets +
6005                 get_stat64(&hw_stats->rx_ucast_packets) +
6006                 get_stat64(&hw_stats->rx_mcast_packets) +
6007                 get_stat64(&hw_stats->rx_bcast_packets);
6008                 
6009         stats->tx_packets = old_stats->tx_packets +
6010                 get_stat64(&hw_stats->tx_ucast_packets) +
6011                 get_stat64(&hw_stats->tx_mcast_packets) +
6012                 get_stat64(&hw_stats->tx_bcast_packets);
6013
6014         stats->rx_bytes = old_stats->rx_bytes +
6015                 get_stat64(&hw_stats->rx_octets);
6016         stats->tx_bytes = old_stats->tx_bytes +
6017                 get_stat64(&hw_stats->tx_octets);
6018
6019         stats->rx_errors = old_stats->rx_errors +
6020                 get_stat64(&hw_stats->rx_errors) +
6021                 get_stat64(&hw_stats->rx_discards);
6022         stats->tx_errors = old_stats->tx_errors +
6023                 get_stat64(&hw_stats->tx_errors) +
6024                 get_stat64(&hw_stats->tx_mac_errors) +
6025                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6026                 get_stat64(&hw_stats->tx_discards);
6027
6028         stats->multicast = old_stats->multicast +
6029                 get_stat64(&hw_stats->rx_mcast_packets);
6030         stats->collisions = old_stats->collisions +
6031                 get_stat64(&hw_stats->tx_collisions);
6032
6033         stats->rx_length_errors = old_stats->rx_length_errors +
6034                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6035                 get_stat64(&hw_stats->rx_undersize_packets);
6036
6037         stats->rx_over_errors = old_stats->rx_over_errors +
6038                 get_stat64(&hw_stats->rxbds_empty);
6039         stats->rx_frame_errors = old_stats->rx_frame_errors +
6040                 get_stat64(&hw_stats->rx_align_errors);
6041         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6042                 get_stat64(&hw_stats->tx_discards);
6043         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6044                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6045
6046         stats->rx_crc_errors = old_stats->rx_crc_errors +
6047                 calc_crc_errors(tp);
6048
6049         return stats;
6050 }
6051
6052 static inline u32 calc_crc(unsigned char *buf, int len)
6053 {
6054         u32 reg;
6055         u32 tmp;
6056         int j, k;
6057
6058         reg = 0xffffffff;
6059
6060         for (j = 0; j < len; j++) {
6061                 reg ^= buf[j];
6062
6063                 for (k = 0; k < 8; k++) {
6064                         tmp = reg & 0x01;
6065
6066                         reg >>= 1;
6067
6068                         if (tmp) {
6069                                 reg ^= 0xedb88320;
6070                         }
6071                 }
6072         }
6073
6074         return ~reg;
6075 }
6076
6077 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6078 {
6079         /* accept or reject all multicast frames */
6080         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6081         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6082         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6083         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6084 }
6085
6086 static void __tg3_set_rx_mode(struct net_device *dev)
6087 {
6088         struct tg3 *tp = netdev_priv(dev);
6089         u32 rx_mode;
6090
6091         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6092                                   RX_MODE_KEEP_VLAN_TAG);
6093
6094         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6095          * flag clear.
6096          */
6097 #if TG3_VLAN_TAG_USED
6098         if (!tp->vlgrp &&
6099             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6100                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6101 #else
6102         /* By definition, VLAN is disabled always in this
6103          * case.
6104          */
6105         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6106                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6107 #endif
6108
6109         if (dev->flags & IFF_PROMISC) {
6110                 /* Promiscuous mode. */
6111                 rx_mode |= RX_MODE_PROMISC;
6112         } else if (dev->flags & IFF_ALLMULTI) {
6113                 /* Accept all multicast. */
6114                 tg3_set_multi (tp, 1);
6115         } else if (dev->mc_count < 1) {
6116                 /* Reject all multicast. */
6117                 tg3_set_multi (tp, 0);
6118         } else {
6119                 /* Accept one or more multicast(s). */
6120                 struct dev_mc_list *mclist;
6121                 unsigned int i;
6122                 u32 mc_filter[4] = { 0, };
6123                 u32 regidx;
6124                 u32 bit;
6125                 u32 crc;
6126
6127                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6128                      i++, mclist = mclist->next) {
6129
6130                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6131                         bit = ~crc & 0x7f;
6132                         regidx = (bit & 0x60) >> 5;
6133                         bit &= 0x1f;
6134                         mc_filter[regidx] |= (1 << bit);
6135                 }
6136
6137                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6138                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6139                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6140                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6141         }
6142
6143         if (rx_mode != tp->rx_mode) {
6144                 tp->rx_mode = rx_mode;
6145                 tw32_f(MAC_RX_MODE, rx_mode);
6146                 udelay(10);
6147         }
6148 }
6149
6150 static void tg3_set_rx_mode(struct net_device *dev)
6151 {
6152         struct tg3 *tp = netdev_priv(dev);
6153
6154         spin_lock_irq(&tp->lock);
6155         __tg3_set_rx_mode(dev);
6156         spin_unlock_irq(&tp->lock);
6157 }
6158
6159 #define TG3_REGDUMP_LEN         (32 * 1024)
6160
6161 static int tg3_get_regs_len(struct net_device *dev)
6162 {
6163         return TG3_REGDUMP_LEN;
6164 }
6165
6166 static void tg3_get_regs(struct net_device *dev,
6167                 struct ethtool_regs *regs, void *_p)
6168 {
6169         u32 *p = _p;
6170         struct tg3 *tp = netdev_priv(dev);
6171         u8 *orig_p = _p;
6172         int i;
6173
6174         regs->version = 0;
6175
6176         memset(p, 0, TG3_REGDUMP_LEN);
6177
6178         spin_lock_irq(&tp->lock);
6179         spin_lock(&tp->tx_lock);
6180
6181 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6182 #define GET_REG32_LOOP(base,len)                \
6183 do {    p = (u32 *)(orig_p + (base));           \
6184         for (i = 0; i < len; i += 4)            \
6185                 __GET_REG32((base) + i);        \
6186 } while (0)
6187 #define GET_REG32_1(reg)                        \
6188 do {    p = (u32 *)(orig_p + (reg));            \
6189         __GET_REG32((reg));                     \
6190 } while (0)
6191
6192         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6193         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6194         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6195         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6196         GET_REG32_1(SNDDATAC_MODE);
6197         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6198         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6199         GET_REG32_1(SNDBDC_MODE);
6200         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6201         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6202         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6203         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6204         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6205         GET_REG32_1(RCVDCC_MODE);
6206         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6207         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6208         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6209         GET_REG32_1(MBFREE_MODE);
6210         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6211         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6212         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6213         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6214         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6215         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6216         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6217         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6218         GET_REG32_LOOP(FTQ_RESET, 0x120);
6219         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6220         GET_REG32_1(DMAC_MODE);
6221         GET_REG32_LOOP(GRC_MODE, 0x4c);
6222         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6223                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6224
6225 #undef __GET_REG32
6226 #undef GET_REG32_LOOP
6227 #undef GET_REG32_1
6228
6229         spin_unlock(&tp->tx_lock);
6230         spin_unlock_irq(&tp->lock);
6231 }
6232
6233 static int tg3_get_eeprom_len(struct net_device *dev)
6234 {
6235         return EEPROM_CHIP_SIZE;
6236 }
6237
6238 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6239                                                  u32 offset, u32 *val);
6240 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6241 {
6242         struct tg3 *tp = dev->priv;
6243         int ret;
6244         u8  *pd;
6245         u32 i, offset, len, val, b_offset, b_count;
6246
6247         offset = eeprom->offset;
6248         len = eeprom->len;
6249         eeprom->len = 0;
6250
6251         ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6252         if (ret)
6253                 return ret;
6254         eeprom->magic = swab32(eeprom->magic);
6255
6256         if (offset & 3) {
6257                 /* adjustments to start on required 4 byte boundary */
6258                 b_offset = offset & 3;
6259                 b_count = 4 - b_offset;
6260                 if (b_count > len) {
6261                         /* i.e. offset=1 len=2 */
6262                         b_count = len;
6263                 }
6264                 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6265                 if (ret)
6266                         return ret;
6267                 memcpy(data, ((char*)&val) + b_offset, b_count);
6268                 len -= b_count;
6269                 offset += b_count;
6270                 eeprom->len += b_count;
6271         }
6272
6273         /* read bytes upto the last 4 byte boundary */
6274         pd = &data[eeprom->len];
6275         for (i = 0; i < (len - (len & 3)); i += 4) {
6276                 ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
6277                                 (u32*)(pd + i));
6278                 if (ret) {
6279                         eeprom->len += i;
6280                         return ret;
6281                 }
6282         }
6283         eeprom->len += i;
6284
6285         if (len & 3) {
6286                 /* read last bytes not ending on 4 byte boundary */
6287                 pd = &data[eeprom->len];
6288                 b_count = len & 3;
6289                 b_offset = offset + len - b_count;
6290                 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6291                 if (ret)
6292                         return ret;
6293                 memcpy(pd, ((char*)&val), b_count);
6294                 eeprom->len += b_count;
6295         }
6296         return 0;
6297 }
6298
6299 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6300 {
6301         struct tg3 *tp = netdev_priv(dev);
6302   
6303         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6304                                         tp->link_config.phy_is_low_power)
6305                 return -EAGAIN;
6306
6307         cmd->supported = (SUPPORTED_Autoneg);
6308
6309         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6310                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6311                                    SUPPORTED_1000baseT_Full);
6312
6313         if (tp->phy_id != PHY_ID_SERDES)
6314                 cmd->supported |= (SUPPORTED_100baseT_Half |
6315                                   SUPPORTED_100baseT_Full |
6316                                   SUPPORTED_10baseT_Half |
6317                                   SUPPORTED_10baseT_Full |
6318                                   SUPPORTED_MII);
6319         else
6320                 cmd->supported |= SUPPORTED_FIBRE;
6321   
6322         cmd->advertising = tp->link_config.advertising;
6323         cmd->speed = tp->link_config.active_speed;
6324         cmd->duplex = tp->link_config.active_duplex;
6325         cmd->port = 0;
6326         cmd->phy_address = PHY_ADDR;
6327         cmd->transceiver = 0;
6328         cmd->autoneg = tp->link_config.autoneg;
6329         cmd->maxtxpkt = 0;
6330         cmd->maxrxpkt = 0;
6331         return 0;
6332 }
6333   
6334 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6335 {
6336         struct tg3 *tp = netdev_priv(dev);
6337   
6338         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6339             tp->link_config.phy_is_low_power)
6340                 return -EAGAIN;
6341
6342         if (tp->phy_id == PHY_ID_SERDES) {
6343                 /* These are the only valid advertisement bits allowed.  */
6344                 if (cmd->autoneg == AUTONEG_ENABLE &&
6345                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6346                                           ADVERTISED_1000baseT_Full |
6347                                           ADVERTISED_Autoneg |
6348                                           ADVERTISED_FIBRE)))
6349                         return -EINVAL;
6350         }
6351
6352         spin_lock_irq(&tp->lock);
6353         spin_lock(&tp->tx_lock);
6354
6355         tp->link_config.autoneg = cmd->autoneg;
6356         if (cmd->autoneg == AUTONEG_ENABLE) {
6357                 tp->link_config.advertising = cmd->advertising;
6358                 tp->link_config.speed = SPEED_INVALID;
6359                 tp->link_config.duplex = DUPLEX_INVALID;
6360         } else {
6361                 tp->link_config.advertising = 0;
6362                 tp->link_config.speed = cmd->speed;
6363                 tp->link_config.duplex = cmd->duplex;
6364         }
6365   
6366         tg3_setup_phy(tp, 1);
6367         spin_unlock(&tp->tx_lock);
6368         spin_unlock_irq(&tp->lock);
6369   
6370         return 0;
6371 }
6372   
6373 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6374 {
6375         struct tg3 *tp = netdev_priv(dev);
6376   
6377         strcpy(info->driver, DRV_MODULE_NAME);
6378         strcpy(info->version, DRV_MODULE_VERSION);
6379         strcpy(info->bus_info, pci_name(tp->pdev));
6380 }
6381   
6382 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6383 {
6384         struct tg3 *tp = netdev_priv(dev);
6385   
6386         wol->supported = WAKE_MAGIC;
6387         wol->wolopts = 0;
6388         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6389                 wol->wolopts = WAKE_MAGIC;
6390         memset(&wol->sopass, 0, sizeof(wol->sopass));
6391 }
6392   
6393 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6394 {
6395         struct tg3 *tp = netdev_priv(dev);
6396   
6397         if (wol->wolopts & ~WAKE_MAGIC)
6398                 return -EINVAL;
6399         if ((wol->wolopts & WAKE_MAGIC) &&
6400             tp->phy_id == PHY_ID_SERDES &&
6401             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6402                 return -EINVAL;
6403   
6404         spin_lock_irq(&tp->lock);
6405         if (wol->wolopts & WAKE_MAGIC)
6406                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6407         else
6408                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6409         spin_unlock_irq(&tp->lock);
6410   
6411         return 0;
6412 }
6413   
6414 static u32 tg3_get_msglevel(struct net_device *dev)
6415 {
6416         struct tg3 *tp = netdev_priv(dev);
6417         return tp->msg_enable;
6418 }
6419   
6420 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6421 {
6422         struct tg3 *tp = netdev_priv(dev);
6423         tp->msg_enable = value;
6424 }
6425   
6426 #if TG3_TSO_SUPPORT != 0
6427 static int tg3_set_tso(struct net_device *dev, u32 value)
6428 {
6429         struct tg3 *tp = netdev_priv(dev);
6430
6431         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6432                 if (value)
6433                         return -EINVAL;
6434                 return 0;
6435         }
6436         return ethtool_op_set_tso(dev, value);
6437 }
6438 #endif
6439   
6440 static int tg3_nway_reset(struct net_device *dev)
6441 {
6442         struct tg3 *tp = netdev_priv(dev);
6443         u32 bmcr;
6444         int r;
6445   
6446         spin_lock_irq(&tp->lock);
6447         tg3_readphy(tp, MII_BMCR, &bmcr);
6448         tg3_readphy(tp, MII_BMCR, &bmcr);
6449         r = -EINVAL;
6450         if (bmcr & BMCR_ANENABLE) {
6451                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6452                 r = 0;
6453         }
6454         spin_unlock_irq(&tp->lock);
6455   
6456         return r;
6457 }
6458   
6459 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6460 {
6461         struct tg3 *tp = netdev_priv(dev);
6462   
6463         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6464         ering->rx_mini_max_pending = 0;
6465         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6466
6467         ering->rx_pending = tp->rx_pending;
6468         ering->rx_mini_pending = 0;
6469         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6470         ering->tx_pending = tp->tx_pending;
6471 }
6472   
6473 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6474 {
6475         struct tg3 *tp = netdev_priv(dev);
6476   
6477         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6478             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6479             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6480                 return -EINVAL;
6481   
6482         tg3_netif_stop(tp);
6483         spin_lock_irq(&tp->lock);
6484         spin_lock(&tp->tx_lock);
6485   
6486         tp->rx_pending = ering->rx_pending;
6487
6488         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6489             tp->rx_pending > 63)
6490                 tp->rx_pending = 63;
6491         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6492         tp->tx_pending = ering->tx_pending;
6493
6494         tg3_halt(tp);
6495         tg3_init_hw(tp);
6496         netif_wake_queue(tp->dev);
6497         spin_unlock(&tp->tx_lock);
6498         spin_unlock_irq(&tp->lock);
6499         tg3_netif_start(tp);
6500   
6501         return 0;
6502 }
6503   
6504 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6505 {
6506         struct tg3 *tp = netdev_priv(dev);
6507   
6508         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6509         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6510         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6511 }
6512   
6513 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6514 {
6515         struct tg3 *tp = netdev_priv(dev);
6516   
6517         tg3_netif_stop(tp);
6518         spin_lock_irq(&tp->lock);
6519         spin_lock(&tp->tx_lock);
6520         if (epause->autoneg)
6521                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6522         else
6523                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6524         if (epause->rx_pause)
6525                 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6526         else
6527                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6528         if (epause->tx_pause)
6529                 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6530         else
6531                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6532         tg3_halt(tp);
6533         tg3_init_hw(tp);
6534         spin_unlock(&tp->tx_lock);
6535         spin_unlock_irq(&tp->lock);
6536         tg3_netif_start(tp);
6537   
6538         return 0;
6539 }
6540   
6541 static u32 tg3_get_rx_csum(struct net_device *dev)
6542 {
6543         struct tg3 *tp = netdev_priv(dev);
6544         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6545 }
6546   
6547 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6548 {
6549         struct tg3 *tp = netdev_priv(dev);
6550   
6551         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6552                 if (data != 0)
6553                         return -EINVAL;
6554                 return 0;
6555         }
6556   
6557         spin_lock_irq(&tp->lock);
6558         if (data)
6559                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6560         else
6561                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6562         spin_unlock_irq(&tp->lock);
6563   
6564         return 0;
6565 }
6566   
6567 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6568 {
6569         struct tg3 *tp = netdev_priv(dev);
6570   
6571         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6572                 if (data != 0)
6573                         return -EINVAL;
6574                 return 0;
6575         }
6576   
6577         if (data)
6578                 dev->features |= NETIF_F_IP_CSUM;
6579         else
6580                 dev->features &= ~NETIF_F_IP_CSUM;
6581
6582         return 0;
6583 }
6584
6585 static int tg3_get_stats_count (struct net_device *dev)
6586 {
6587         return TG3_NUM_STATS;
6588 }
6589
6590 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6591 {
6592         switch (stringset) {
6593         case ETH_SS_STATS:
6594                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6595                 break;
6596         default:
6597                 WARN_ON(1);     /* we need a WARN() */
6598                 break;
6599         }
6600 }
6601
6602 static void tg3_get_ethtool_stats (struct net_device *dev,
6603                                    struct ethtool_stats *estats, u64 *tmp_stats)
6604 {
6605         struct tg3 *tp = dev->priv;
6606         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6607 }
6608
6609 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6610 {
6611         struct mii_ioctl_data *data = if_mii(ifr);
6612         struct tg3 *tp = netdev_priv(dev);
6613         int err;
6614
6615         switch(cmd) {
6616         case SIOCGMIIPHY:
6617                 data->phy_id = PHY_ADDR;
6618
6619                 /* fallthru */
6620         case SIOCGMIIREG: {
6621                 u32 mii_regval;
6622
6623                 if (tp->phy_id == PHY_ID_SERDES)
6624                         break;                  /* We have no PHY */
6625
6626                 spin_lock_irq(&tp->lock);
6627                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6628                 spin_unlock_irq(&tp->lock);
6629
6630                 data->val_out = mii_regval;
6631
6632                 return err;
6633         }
6634
6635         case SIOCSMIIREG:
6636                 if (tp->phy_id == PHY_ID_SERDES)
6637                         break;                  /* We have no PHY */
6638
6639                 if (!capable(CAP_NET_ADMIN))
6640                         return -EPERM;
6641
6642                 spin_lock_irq(&tp->lock);
6643                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6644                 spin_unlock_irq(&tp->lock);
6645
6646                 return err;
6647
6648         default:
6649                 /* do nothing */
6650                 break;
6651         }
6652         return -EOPNOTSUPP;
6653 }
6654
6655 #if TG3_VLAN_TAG_USED
6656 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6657 {
6658         struct tg3 *tp = netdev_priv(dev);
6659
6660         spin_lock_irq(&tp->lock);
6661         spin_lock(&tp->tx_lock);
6662
6663         tp->vlgrp = grp;
6664
6665         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6666         __tg3_set_rx_mode(dev);
6667
6668         spin_unlock(&tp->tx_lock);
6669         spin_unlock_irq(&tp->lock);
6670 }
6671
6672 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6673 {
6674         struct tg3 *tp = netdev_priv(dev);
6675
6676         spin_lock_irq(&tp->lock);
6677         spin_lock(&tp->tx_lock);
6678         if (tp->vlgrp)
6679                 tp->vlgrp->vlan_devices[vid] = NULL;
6680         spin_unlock(&tp->tx_lock);
6681         spin_unlock_irq(&tp->lock);
6682 }
6683 #endif
6684
6685 static struct ethtool_ops tg3_ethtool_ops = {
6686         .get_settings           = tg3_get_settings,
6687         .set_settings           = tg3_set_settings,
6688         .get_drvinfo            = tg3_get_drvinfo,
6689         .get_regs_len           = tg3_get_regs_len,
6690         .get_regs               = tg3_get_regs,
6691         .get_wol                = tg3_get_wol,
6692         .set_wol                = tg3_set_wol,
6693         .get_msglevel           = tg3_get_msglevel,
6694         .set_msglevel           = tg3_set_msglevel,
6695         .nway_reset             = tg3_nway_reset,
6696         .get_link               = ethtool_op_get_link,
6697         .get_eeprom_len         = tg3_get_eeprom_len,
6698         .get_eeprom             = tg3_get_eeprom,
6699         .get_ringparam          = tg3_get_ringparam,
6700         .set_ringparam          = tg3_set_ringparam,
6701         .get_pauseparam         = tg3_get_pauseparam,
6702         .set_pauseparam         = tg3_set_pauseparam,
6703         .get_rx_csum            = tg3_get_rx_csum,
6704         .set_rx_csum            = tg3_set_rx_csum,
6705         .get_tx_csum            = ethtool_op_get_tx_csum,
6706         .set_tx_csum            = tg3_set_tx_csum,
6707         .get_sg                 = ethtool_op_get_sg,
6708         .set_sg                 = ethtool_op_set_sg,
6709 #if TG3_TSO_SUPPORT != 0
6710         .get_tso                = ethtool_op_get_tso,
6711         .set_tso                = tg3_set_tso,
6712 #endif
6713         .get_strings            = tg3_get_strings,
6714         .get_stats_count        = tg3_get_stats_count,
6715         .get_ethtool_stats      = tg3_get_ethtool_stats,
6716 };
6717
6718 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6719 static void __devinit tg3_nvram_init(struct tg3 *tp)
6720 {
6721         int j;
6722
6723         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6724                 return;
6725
6726         tw32_f(GRC_EEPROM_ADDR,
6727              (EEPROM_ADDR_FSM_RESET |
6728               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6729                EEPROM_ADDR_CLKPERD_SHIFT)));
6730
6731         /* XXX schedule_timeout() ... */
6732         for (j = 0; j < 100; j++)
6733                 udelay(10);
6734
6735         /* Enable seeprom accesses. */
6736         tw32_f(GRC_LOCAL_CTRL,
6737              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6738         udelay(100);
6739
6740         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6741             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6742                 u32 nvcfg1;
6743
6744                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6745                         u32 nvaccess = tr32(NVRAM_ACCESS);
6746
6747                         tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6748                 }
6749
6750                 nvcfg1 = tr32(NVRAM_CFG1);
6751
6752                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6753                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6754                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6755                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6756                 } else {
6757                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6758                         tw32(NVRAM_CFG1, nvcfg1);
6759                 }
6760
6761                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6762                         u32 nvaccess = tr32(NVRAM_ACCESS);
6763
6764                         tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6765                 }
6766         } else {
6767                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6768         }
6769 }
6770
6771 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6772                                                  u32 offset, u32 *val)
6773 {
6774         u32 tmp;
6775         int i;
6776
6777         if (offset > EEPROM_ADDR_ADDR_MASK ||
6778             (offset % 4) != 0)
6779                 return -EINVAL;
6780
6781         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6782                                         EEPROM_ADDR_DEVID_MASK |
6783                                         EEPROM_ADDR_READ);
6784         tw32(GRC_EEPROM_ADDR,
6785              tmp |
6786              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6787              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6788               EEPROM_ADDR_ADDR_MASK) |
6789              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6790
6791         for (i = 0; i < 10000; i++) {
6792                 tmp = tr32(GRC_EEPROM_ADDR);
6793
6794                 if (tmp & EEPROM_ADDR_COMPLETE)
6795                         break;
6796                 udelay(100);
6797         }
6798         if (!(tmp & EEPROM_ADDR_COMPLETE))
6799                 return -EBUSY;
6800
6801         *val = tr32(GRC_EEPROM_DATA);
6802         return 0;
6803 }
6804
6805 static int __devinit tg3_nvram_read(struct tg3 *tp,
6806                                     u32 offset, u32 *val)
6807 {
6808         int i;
6809
6810         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6811                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6812                 return -EINVAL;
6813         }
6814
6815         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6816                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6817
6818         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6819                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6820                           NVRAM_BUFFERED_PAGE_POS) +
6821                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6822
6823         if (offset > NVRAM_ADDR_MSK)
6824                 return -EINVAL;
6825
6826         tg3_nvram_lock(tp);
6827
6828         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6829                 u32 nvaccess = tr32(NVRAM_ACCESS);
6830
6831                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6832         }
6833
6834         tw32(NVRAM_ADDR, offset);
6835         tw32(NVRAM_CMD,
6836              NVRAM_CMD_RD | NVRAM_CMD_GO |
6837              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6838
6839         /* Wait for done bit to clear. */
6840         for (i = 0; i < 1000; i++) {
6841                 udelay(10);
6842                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6843                         udelay(10);
6844                         *val = swab32(tr32(NVRAM_RDDATA));
6845                         break;
6846                 }
6847         }
6848
6849         tg3_nvram_unlock(tp);
6850
6851         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6852                 u32 nvaccess = tr32(NVRAM_ACCESS);
6853
6854                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6855         }
6856
6857         if (i >= 1000)
6858                 return -EBUSY;
6859
6860         return 0;
6861 }
6862
6863 struct subsys_tbl_ent {
6864         u16 subsys_vendor, subsys_devid;
6865         u32 phy_id;
6866 };
6867
6868 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6869         /* Broadcom boards. */
6870         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6871         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6872         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6873         { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
6874         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6875         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6876         { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
6877         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6878         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6879         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6880         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6881
6882         /* 3com boards. */
6883         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6884         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6885         { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
6886         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6887         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6888
6889         /* DELL boards. */
6890         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6891         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6892         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6893         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6894
6895         /* Compaq boards. */
6896         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6897         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6898         { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
6899         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6900         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6901
6902         /* IBM boards. */
6903         { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6904 };
6905
6906 static int __devinit tg3_phy_probe(struct tg3 *tp)
6907 {
6908         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6909         u32 hw_phy_id, hw_phy_id_masked;
6910         u32 val;
6911         int i, eeprom_signature_found, err;
6912
6913         tp->phy_id = PHY_ID_INVALID;
6914         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6915                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6916                      tp->pdev->subsystem_vendor) &&
6917                     (subsys_id_to_phy_id[i].subsys_devid ==
6918                      tp->pdev->subsystem_device)) {
6919                         tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6920                         break;
6921                 }
6922         }
6923
6924         eeprom_phy_id = PHY_ID_INVALID;
6925         eeprom_signature_found = 0;
6926         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6927         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6928                 u32 nic_cfg, led_cfg;
6929
6930                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6931                 tp->nic_sram_data_cfg = nic_cfg;
6932
6933                 eeprom_signature_found = 1;
6934
6935                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
6936                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
6937                         eeprom_phy_id = PHY_ID_SERDES;
6938                 } else {
6939                         u32 nic_phy_id;
6940
6941                         tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
6942                         if (nic_phy_id != 0) {
6943                                 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
6944                                 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
6945
6946                                 eeprom_phy_id  = (id1 >> 16) << 10;
6947                                 eeprom_phy_id |= (id2 & 0xfc00) << 16;
6948                                 eeprom_phy_id |= (id2 & 0x03ff) <<  0;
6949                         }
6950                 }
6951
6952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6953                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
6954                         led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
6955                                     SHASTA_EXT_LED_MODE_MASK);
6956                 } else
6957                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
6958
6959                 switch (led_cfg) {
6960                 default:
6961                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
6962                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
6963                         break;
6964
6965                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
6966                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
6967                         break;
6968
6969                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
6970                         tp->led_ctrl = LED_CTRL_MODE_MAC;
6971                         break;
6972
6973                 case SHASTA_EXT_LED_SHARED:
6974                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
6975                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6976                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
6977                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
6978                                                  LED_CTRL_MODE_PHY_2);
6979                         break;
6980
6981                 case SHASTA_EXT_LED_MAC:
6982                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
6983                         break;
6984
6985                 case SHASTA_EXT_LED_COMBO:
6986                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
6987                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
6988                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
6989                                                  LED_CTRL_MODE_PHY_2);
6990                         break;
6991
6992                 };
6993
6994                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6995                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
6996                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
6997                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
6998
6999                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7000                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7001                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7002                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7003                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7004
7005                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7006                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7007                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7008                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7009                 }
7010                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7011                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7012         }
7013
7014         /* Reading the PHY ID register can conflict with ASF
7015          * firwmare access to the PHY hardware.
7016          */
7017         err = 0;
7018         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7019                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7020         } else {
7021                 /* Now read the physical PHY_ID from the chip and verify
7022                  * that it is sane.  If it doesn't look good, we fall back
7023                  * to either the hard-coded table based PHY_ID and failing
7024                  * that the value found in the eeprom area.
7025                  */
7026                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7027                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7028
7029                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7030                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7031                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7032
7033                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7034         }
7035
7036         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7037                 tp->phy_id = hw_phy_id;
7038         } else {
7039                 /* phy_id currently holds the value found in the
7040                  * subsys_id_to_phy_id[] table or PHY_ID_INVALID
7041                  * if a match was not found there.
7042                  */
7043                 if (tp->phy_id == PHY_ID_INVALID) {
7044                         if (!eeprom_signature_found ||
7045                             !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
7046                                 return -ENODEV;
7047                         tp->phy_id = eeprom_phy_id;
7048                 }
7049         }
7050
7051         if (tp->phy_id != PHY_ID_SERDES &&
7052             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7053                 u32 bmsr, adv_reg, tg3_ctrl;
7054
7055                 tg3_readphy(tp, MII_BMSR, &bmsr);
7056                 tg3_readphy(tp, MII_BMSR, &bmsr);
7057
7058                 if (bmsr & BMSR_LSTATUS)
7059                         goto skip_phy_reset;
7060                     
7061                 err = tg3_phy_reset(tp);
7062                 if (err)
7063                         return err;
7064
7065                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7066                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7067                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7068                 tg3_ctrl = 0;
7069                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7070                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7071                                     MII_TG3_CTRL_ADV_1000_FULL);
7072                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7073                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7074                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7075                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7076                 }
7077
7078                 if (!tg3_copper_is_advertising_all(tp)) {
7079                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7080
7081                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7082                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7083
7084                         tg3_writephy(tp, MII_BMCR,
7085                                      BMCR_ANENABLE | BMCR_ANRESTART);
7086                 }
7087                 tg3_phy_set_wirespeed(tp);
7088
7089                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7090                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7091                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7092         }
7093
7094 skip_phy_reset:
7095         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7096                 err = tg3_init_5401phy_dsp(tp);
7097                 if (err)
7098                         return err;
7099         }
7100
7101         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7102                 err = tg3_init_5401phy_dsp(tp);
7103         }
7104
7105         if (!eeprom_signature_found)
7106                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7107
7108         if (tp->phy_id == PHY_ID_SERDES)
7109                 tp->link_config.advertising =
7110                         (ADVERTISED_1000baseT_Half |
7111                          ADVERTISED_1000baseT_Full |
7112                          ADVERTISED_Autoneg |
7113                          ADVERTISED_FIBRE);
7114         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7115                 tp->link_config.advertising &=
7116                         ~(ADVERTISED_1000baseT_Half |
7117                           ADVERTISED_1000baseT_Full);
7118
7119         return err;
7120 }
7121
7122 static void __devinit tg3_read_partno(struct tg3 *tp)
7123 {
7124         unsigned char vpd_data[256];
7125         int i;
7126
7127         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
7128                 /* Sun decided not to put the necessary bits in the
7129                  * NVRAM of their onboard tg3 parts :(
7130                  */
7131                 strcpy(tp->board_part_number, "Sun 5704");
7132                 return;
7133         }
7134
7135         for (i = 0; i < 256; i += 4) {
7136                 u32 tmp;
7137
7138                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7139                         goto out_not_found;
7140
7141                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7142                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7143                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7144                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7145         }
7146
7147         /* Now parse and find the part number. */
7148         for (i = 0; i < 256; ) {
7149                 unsigned char val = vpd_data[i];
7150                 int block_end;
7151
7152                 if (val == 0x82 || val == 0x91) {
7153                         i = (i + 3 +
7154                              (vpd_data[i + 1] +
7155                               (vpd_data[i + 2] << 8)));
7156                         continue;
7157                 }
7158
7159                 if (val != 0x90)
7160                         goto out_not_found;
7161
7162                 block_end = (i + 3 +
7163                              (vpd_data[i + 1] +
7164                               (vpd_data[i + 2] << 8)));
7165                 i += 3;
7166                 while (i < block_end) {
7167                         if (vpd_data[i + 0] == 'P' &&
7168                             vpd_data[i + 1] == 'N') {
7169                                 int partno_len = vpd_data[i + 2];
7170
7171                                 if (partno_len > 24)
7172                                         goto out_not_found;
7173
7174                                 memcpy(tp->board_part_number,
7175                                        &vpd_data[i + 3],
7176                                        partno_len);
7177
7178                                 /* Success. */
7179                                 return;
7180                         }
7181                 }
7182
7183                 /* Part number not found. */
7184                 goto out_not_found;
7185         }
7186
7187 out_not_found:
7188         strcpy(tp->board_part_number, "none");
7189 }
7190
7191 #ifdef CONFIG_SPARC64
7192 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
7193 {
7194         struct pci_dev *pdev = tp->pdev;
7195         struct pcidev_cookie *pcp = pdev->sysdata;
7196
7197         if (pcp != NULL) {
7198                 int node = pcp->prom_node;
7199                 u32 venid, devid;
7200                 int err;
7201
7202                 err = prom_getproperty(node, "subsystem-vendor-id",
7203                                        (char *) &venid, sizeof(venid));
7204                 if (err == 0 || err == -1)
7205                         return 0;
7206                 err = prom_getproperty(node, "subsystem-id",
7207                                        (char *) &devid, sizeof(devid));
7208                 if (err == 0 || err == -1)
7209                         return 0;
7210
7211                 if (venid == PCI_VENDOR_ID_SUN &&
7212                     devid == PCI_DEVICE_ID_TIGON3_5704)
7213                         return 1;
7214         }
7215         return 0;
7216 }
7217 #endif
7218
7219 static int __devinit tg3_get_invariants(struct tg3 *tp)
7220 {
7221         u32 misc_ctrl_reg;
7222         u32 cacheline_sz_reg;
7223         u32 pci_state_reg, grc_misc_cfg;
7224         u32 val;
7225         u16 pci_cmd;
7226         int err;
7227
7228 #ifdef CONFIG_SPARC64
7229         if (tg3_is_sun_5704(tp))
7230                 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
7231 #endif
7232
7233         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7234          * reordering to the mailbox registers done by the host
7235          * controller can cause major troubles.  We read back from
7236          * every mailbox register write to force the writes to be
7237          * posted to the chip in order.
7238          */
7239         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7240                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7241             pci_find_device(PCI_VENDOR_ID_INTEL,
7242                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7243             pci_find_device(PCI_VENDOR_ID_INTEL,
7244                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7245             pci_find_device(PCI_VENDOR_ID_INTEL,
7246                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7247             pci_find_device(PCI_VENDOR_ID_AMD,
7248                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7249                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7250
7251         /* Force memory write invalidate off.  If we leave it on,
7252          * then on 5700_BX chips we have to enable a workaround.
7253          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7254          * to match the cacheline size.  The Broadcom driver have this
7255          * workaround but turns MWI off all the times so never uses
7256          * it.  This seems to suggest that the workaround is insufficient.
7257          */
7258         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7259         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7260         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7261
7262         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7263          * has the register indirect write enable bit set before
7264          * we try to access any of the MMIO registers.  It is also
7265          * critical that the PCI-X hw workaround situation is decided
7266          * before that as well.
7267          */
7268         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7269                               &misc_ctrl_reg);
7270
7271         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7272                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7273
7274         /* Initialize misc host control in PCI block. */
7275         tp->misc_host_ctrl |= (misc_ctrl_reg &
7276                                MISC_HOST_CTRL_CHIPREV);
7277         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7278                                tp->misc_host_ctrl);
7279
7280         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7281                               &cacheline_sz_reg);
7282
7283         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7284         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7285         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7286         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7287
7288         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7289                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7290
7291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7292             tp->pci_lat_timer < 64) {
7293                 tp->pci_lat_timer = 64;
7294
7295                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7296                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7297                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7298                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7299
7300                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7301                                        cacheline_sz_reg);
7302         }
7303
7304         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7305                               &pci_state_reg);
7306
7307         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7308                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7309
7310                 /* If this is a 5700 BX chipset, and we are in PCI-X
7311                  * mode, enable register write workaround.
7312                  *
7313                  * The workaround is to use indirect register accesses
7314                  * for all chip writes not to mailbox registers.
7315                  */
7316                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7317                         u32 pm_reg;
7318                         u16 pci_cmd;
7319
7320                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7321
7322                         /* The chip can have it's power management PCI config
7323                          * space registers clobbered due to this bug.
7324                          * So explicitly force the chip into D0 here.
7325                          */
7326                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7327                                               &pm_reg);
7328                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7329                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7330                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7331                                                pm_reg);
7332
7333                         /* Also, force SERR#/PERR# in PCI command. */
7334                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7335                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7336                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7337                 }
7338         }
7339
7340         /* Back to back register writes can cause problems on this chip,
7341          * the workaround is to read back all reg writes except those to
7342          * mailbox regs.  See tg3_write_indirect_reg32().
7343          *
7344          * PCI Express 5750_A0 rev chips need this workaround too.
7345          */
7346         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7347             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7348              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7349                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7350
7351         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7352                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7353         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7354                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7355
7356         /* Chip-specific fixup from Broadcom driver */
7357         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7358             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7359                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7360                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7361         }
7362
7363         /* Force the chip into D0. */
7364         err = tg3_set_power_state(tp, 0);
7365         if (err) {
7366                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7367                        pci_name(tp->pdev));
7368                 return err;
7369         }
7370
7371         /* 5700 B0 chips do not support checksumming correctly due
7372          * to hardware bugs.
7373          */
7374         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7375                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7376
7377         /* Pseudo-header checksum is done by hardware logic and not
7378          * the offload processers, so make the chip do the pseudo-
7379          * header checksums on receive.  For transmit it is more
7380          * convenient to do the pseudo-header checksum in software
7381          * as Linux does that on transmit for us in all cases.
7382          */
7383         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7384         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7385
7386         /* Derive initial jumbo mode from MTU assigned in
7387          * ether_setup() via the alloc_etherdev() call
7388          */
7389         if (tp->dev->mtu > ETH_DATA_LEN)
7390                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7391
7392         /* Determine WakeOnLan speed to use. */
7393         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7394             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7395             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7396             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7397                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7398         } else {
7399                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7400         }
7401
7402         /* A few boards don't want Ethernet@WireSpeed phy feature */
7403         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7404             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7405              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7406              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7407                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7408
7409         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7410             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7411                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7412         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7413                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7414
7415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7416             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7417                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7418
7419         /* Only 5701 and later support tagged irq status mode.
7420          * Also, 5788 chips cannot use tagged irq status.
7421          *
7422          * However, since we are using NAPI avoid tagged irq status
7423          * because the interrupt condition is more difficult to
7424          * fully clear in that mode.
7425          */
7426         tp->coalesce_mode = 0;
7427
7428         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7429             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7430                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7431
7432         /* Initialize MAC MI mode, polling disabled. */
7433         tw32_f(MAC_MI_MODE, tp->mi_mode);
7434         udelay(80);
7435
7436         /* Initialize data/descriptor byte/word swapping. */
7437         val = tr32(GRC_MODE);
7438         val &= GRC_MODE_HOST_STACKUP;
7439         tw32(GRC_MODE, val | tp->grc_mode);
7440
7441         tg3_switch_clocks(tp);
7442
7443         /* Clear this out for sanity. */
7444         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7445
7446         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7447                               &pci_state_reg);
7448         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7449             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7450                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7451
7452                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7453                     chiprevid == CHIPREV_ID_5701_B0 ||
7454                     chiprevid == CHIPREV_ID_5701_B2 ||
7455                     chiprevid == CHIPREV_ID_5701_B5) {
7456                         unsigned long sram_base;
7457
7458                         /* Write some dummy words into the SRAM status block
7459                          * area, see if it reads back correctly.  If the return
7460                          * value is bad, force enable the PCIX workaround.
7461                          */
7462                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7463
7464                         writel(0x00000000, sram_base);
7465                         writel(0x00000000, sram_base + 4);
7466                         writel(0xffffffff, sram_base + 4);
7467                         if (readl(sram_base) != 0x00000000)
7468                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7469                 }
7470         }
7471
7472         udelay(50);
7473         tg3_nvram_init(tp);
7474
7475         /* Always use host TXDs, it performs better in particular
7476          * with multi-frag packets.  The tests below are kept here
7477          * as documentation should we change this decision again
7478          * in the future.
7479          */
7480         tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7481
7482 #if 0
7483         /* Determine if TX descriptors will reside in
7484          * main memory or in the chip SRAM.
7485          */
7486         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
7487             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7488             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7489                 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7490 #endif
7491
7492         grc_misc_cfg = tr32(GRC_MISC_CFG);
7493         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7494
7495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7496             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7497                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7498                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7499         }
7500
7501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7502             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7503              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7504                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7505
7506         /* these are limited to 10/100 only */
7507         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7508              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7509             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7510              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7511              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7512               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7513               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
7514             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7515              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
7516                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7517
7518         err = tg3_phy_probe(tp);
7519         if (err) {
7520                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7521                        pci_name(tp->pdev), err);
7522                 /* ... but do not return immediately ... */
7523         }
7524
7525         tg3_read_partno(tp);
7526
7527         if (tp->phy_id == PHY_ID_SERDES) {
7528                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7529         } else {
7530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7531                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7532                 else
7533                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7534         }
7535
7536         /* 5700 {AX,BX} chips have a broken status block link
7537          * change bit implementation, so we must use the
7538          * status register in those cases.
7539          */
7540         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7541                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7542         else
7543                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7544
7545         /* The led_ctrl is set during tg3_phy_probe, here we might
7546          * have to force the link status polling mechanism based
7547          * upon subsystem IDs.
7548          */
7549         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7550             tp->phy_id != PHY_ID_SERDES) {
7551                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7552                                   TG3_FLAG_USE_LINKCHG_REG);
7553         }
7554
7555         /* For all SERDES we poll the MAC status register. */
7556         if (tp->phy_id == PHY_ID_SERDES)
7557                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7558         else
7559                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7560
7561         /* 5700 BX chips need to have their TX producer index mailboxes
7562          * written twice to workaround a bug.
7563          */
7564         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7565                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7566         else
7567                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7568
7569         /* It seems all chips can get confused if TX buffers
7570          * straddle the 4GB address boundary in some cases.
7571          */
7572         tp->dev->hard_start_xmit = tg3_start_xmit;
7573
7574         tp->rx_offset = 2;
7575         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7576             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7577                 tp->rx_offset = 0;
7578
7579         /* By default, disable wake-on-lan.  User can change this
7580          * using ETHTOOL_SWOL.
7581          */
7582         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7583
7584         return err;
7585 }
7586
7587 #ifdef CONFIG_SPARC64
7588 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7589 {
7590         struct net_device *dev = tp->dev;
7591         struct pci_dev *pdev = tp->pdev;
7592         struct pcidev_cookie *pcp = pdev->sysdata;
7593
7594         if (pcp != NULL) {
7595                 int node = pcp->prom_node;
7596
7597                 if (prom_getproplen(node, "local-mac-address") == 6) {
7598                         prom_getproperty(node, "local-mac-address",
7599                                          dev->dev_addr, 6);
7600                         return 0;
7601                 }
7602         }
7603         return -ENODEV;
7604 }
7605
7606 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7607 {
7608         struct net_device *dev = tp->dev;
7609
7610         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7611         return 0;
7612 }
7613 #endif
7614
7615 static int __devinit tg3_get_device_address(struct tg3 *tp)
7616 {
7617         struct net_device *dev = tp->dev;
7618         u32 hi, lo, mac_offset;
7619
7620 #ifdef CONFIG_SPARC64
7621         if (!tg3_get_macaddr_sparc(tp))
7622                 return 0;
7623 #endif
7624
7625         mac_offset = 0x7c;
7626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7627             !(tp->tg3_flags & TG3_FLG2_SUN_5704)) {
7628                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7629                         mac_offset = 0xcc;
7630                 if (tg3_nvram_lock(tp))
7631                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7632                 else
7633                         tg3_nvram_unlock(tp);
7634         }
7635
7636         /* First try to get it from MAC address mailbox. */
7637         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7638         if ((hi >> 16) == 0x484b) {
7639                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7640                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7641
7642                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7643                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7644                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7645                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7646                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7647         }
7648         /* Next, try NVRAM. */
7649         else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7650                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7651                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7652                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7653                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7654                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7655                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7656                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7657                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7658         }
7659         /* Finally just fetch it out of the MAC control regs. */
7660         else {
7661                 hi = tr32(MAC_ADDR_0_HIGH);
7662                 lo = tr32(MAC_ADDR_0_LOW);
7663
7664                 dev->dev_addr[5] = lo & 0xff;
7665                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7666                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7667                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7668                 dev->dev_addr[1] = hi & 0xff;
7669                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7670         }
7671
7672         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7673 #ifdef CONFIG_SPARC64
7674                 if (!tg3_get_default_macaddr_sparc(tp))
7675                         return 0;
7676 #endif
7677                 return -EINVAL;
7678         }
7679         return 0;
7680 }
7681
7682 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7683 {
7684         struct tg3_internal_buffer_desc test_desc;
7685         u32 sram_dma_descs;
7686         int i, ret;
7687
7688         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7689
7690         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7691         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7692         tw32(RDMAC_STATUS, 0);
7693         tw32(WDMAC_STATUS, 0);
7694
7695         tw32(BUFMGR_MODE, 0);
7696         tw32(FTQ_RESET, 0);
7697
7698         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7699         test_desc.addr_lo = buf_dma & 0xffffffff;
7700         test_desc.nic_mbuf = 0x00002100;
7701         test_desc.len = size;
7702
7703         /*
7704          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7705          * the *second* time the tg3 driver was getting loaded after an
7706          * initial scan.
7707          *
7708          * Broadcom tells me:
7709          *   ...the DMA engine is connected to the GRC block and a DMA
7710          *   reset may affect the GRC block in some unpredictable way...
7711          *   The behavior of resets to individual blocks has not been tested.
7712          *
7713          * Broadcom noted the GRC reset will also reset all sub-components.
7714          */
7715         if (to_device) {
7716                 test_desc.cqid_sqid = (13 << 8) | 2;
7717
7718                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7719                 udelay(40);
7720         } else {
7721                 test_desc.cqid_sqid = (16 << 8) | 7;
7722
7723                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7724                 udelay(40);
7725         }
7726         test_desc.flags = 0x00000005;
7727
7728         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7729                 u32 val;
7730
7731                 val = *(((u32 *)&test_desc) + i);
7732                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7733                                        sram_dma_descs + (i * sizeof(u32)));
7734                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7735         }
7736         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7737
7738         if (to_device) {
7739                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7740         } else {
7741                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7742         }
7743
7744         ret = -ENODEV;
7745         for (i = 0; i < 40; i++) {
7746                 u32 val;
7747
7748                 if (to_device)
7749                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7750                 else
7751                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7752                 if ((val & 0xffff) == sram_dma_descs) {
7753                         ret = 0;
7754                         break;
7755                 }
7756
7757                 udelay(100);
7758         }
7759
7760         return ret;
7761 }
7762
7763 #define TEST_BUFFER_SIZE        0x400
7764
7765 static int __devinit tg3_test_dma(struct tg3 *tp)
7766 {
7767         dma_addr_t buf_dma;
7768         u32 *buf;
7769         int ret;
7770
7771         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7772         if (!buf) {
7773                 ret = -ENOMEM;
7774                 goto out_nofree;
7775         }
7776
7777         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7778                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7779
7780 #ifndef CONFIG_X86
7781         {
7782                 u8 byte;
7783                 int cacheline_size;
7784                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7785
7786                 if (byte == 0)
7787                         cacheline_size = 1024;
7788                 else
7789                         cacheline_size = (int) byte * 4;
7790
7791                 switch (cacheline_size) {
7792                 case 16:
7793                 case 32:
7794                 case 64:
7795                 case 128:
7796                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7797                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7798                                 tp->dma_rwctrl |=
7799                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7800                                 break;
7801                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7802                                 tp->dma_rwctrl &=
7803                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
7804                                 tp->dma_rwctrl |=
7805                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7806                                 break;
7807                         }
7808                         /* fallthrough */
7809                 case 256:
7810                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7811                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7812                                 tp->dma_rwctrl |=
7813                                         DMA_RWCTRL_WRITE_BNDRY_256;
7814                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7815                                 tp->dma_rwctrl |=
7816                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7817                 };
7818         }
7819 #endif
7820
7821         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7822                 tp->dma_rwctrl |= 0x001f0000;
7823         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7824                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7825                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7826                         tp->dma_rwctrl |= 0x003f0000;
7827                 else
7828                         tp->dma_rwctrl |= 0x003f000f;
7829         } else {
7830                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7831                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7832                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7833
7834                         if (ccval == 0x6 || ccval == 0x7)
7835                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7836
7837                         /* Set bit 23 to renable PCIX hw bug fix */
7838                         tp->dma_rwctrl |= 0x009f0000;
7839                 } else {
7840                         tp->dma_rwctrl |= 0x001b000f;
7841                 }
7842         }
7843
7844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7845             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7846                 tp->dma_rwctrl &= 0xfffffff0;
7847
7848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7850                 /* Remove this if it causes problems for some boards. */
7851                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7852
7853                 /* On 5700/5701 chips, we need to set this bit.
7854                  * Otherwise the chip will issue cacheline transactions
7855                  * to streamable DMA memory with not all the byte
7856                  * enables turned on.  This is an error on several
7857                  * RISC PCI controllers, in particular sparc64.
7858                  *
7859                  * On 5703/5704 chips, this bit has been reassigned
7860                  * a different meaning.  In particular, it is used
7861                  * on those chips to enable a PCI-X workaround.
7862                  */
7863                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7864         }
7865
7866         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7867
7868 #if 0
7869         /* Unneeded, already done by tg3_get_invariants.  */
7870         tg3_switch_clocks(tp);
7871 #endif
7872
7873         ret = 0;
7874         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7875             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7876                 goto out;
7877
7878         while (1) {
7879                 u32 *p = buf, i;
7880
7881                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7882                         p[i] = i;
7883
7884                 /* Send the buffer to the chip. */
7885                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7886                 if (ret) {
7887                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7888                         break;
7889                 }
7890
7891 #if 0
7892                 /* validate data reached card RAM correctly. */
7893                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7894                         u32 val;
7895                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7896                         if (le32_to_cpu(val) != p[i]) {
7897                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
7898                                 /* ret = -ENODEV here? */
7899                         }
7900                         p[i] = 0;
7901                 }
7902 #endif
7903                 /* Now read it back. */
7904                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7905                 if (ret) {
7906                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7907
7908                         break;
7909                 }
7910
7911                 /* Verify it. */
7912                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7913                         if (p[i] == i)
7914                                 continue;
7915
7916                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7917                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7918                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7919                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7920                                 break;
7921                         } else {
7922                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7923                                 ret = -ENODEV;
7924                                 goto out;
7925                         }
7926                 }
7927
7928                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
7929                         /* Success. */
7930                         ret = 0;
7931                         break;
7932                 }
7933         }
7934
7935 out:
7936         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
7937 out_nofree:
7938         return ret;
7939 }
7940
7941 static void __devinit tg3_init_link_config(struct tg3 *tp)
7942 {
7943         tp->link_config.advertising =
7944                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
7945                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
7946                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
7947                  ADVERTISED_Autoneg | ADVERTISED_MII);
7948         tp->link_config.speed = SPEED_INVALID;
7949         tp->link_config.duplex = DUPLEX_INVALID;
7950         tp->link_config.autoneg = AUTONEG_ENABLE;
7951         netif_carrier_off(tp->dev);
7952         tp->link_config.active_speed = SPEED_INVALID;
7953         tp->link_config.active_duplex = DUPLEX_INVALID;
7954         tp->link_config.phy_is_low_power = 0;
7955         tp->link_config.orig_speed = SPEED_INVALID;
7956         tp->link_config.orig_duplex = DUPLEX_INVALID;
7957         tp->link_config.orig_autoneg = AUTONEG_INVALID;
7958 }
7959
7960 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
7961 {
7962         tp->bufmgr_config.mbuf_read_dma_low_water =
7963                 DEFAULT_MB_RDMA_LOW_WATER;
7964         tp->bufmgr_config.mbuf_mac_rx_low_water =
7965                 DEFAULT_MB_MACRX_LOW_WATER;
7966         tp->bufmgr_config.mbuf_high_water =
7967                 DEFAULT_MB_HIGH_WATER;
7968
7969         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
7970                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
7971         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
7972                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
7973         tp->bufmgr_config.mbuf_high_water_jumbo =
7974                 DEFAULT_MB_HIGH_WATER_JUMBO;
7975
7976         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
7977         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
7978 }
7979
7980 static char * __devinit tg3_phy_string(struct tg3 *tp)
7981 {
7982         switch (tp->phy_id & PHY_ID_MASK) {
7983         case PHY_ID_BCM5400:    return "5400";
7984         case PHY_ID_BCM5401:    return "5401";
7985         case PHY_ID_BCM5411:    return "5411";
7986         case PHY_ID_BCM5701:    return "5701";
7987         case PHY_ID_BCM5703:    return "5703";
7988         case PHY_ID_BCM5704:    return "5704";
7989         case PHY_ID_BCM5705:    return "5705";
7990         case PHY_ID_BCM5750:    return "5750";
7991         case PHY_ID_BCM8002:    return "8002";
7992         case PHY_ID_SERDES:     return "serdes";
7993         default:                return "unknown";
7994         };
7995 }
7996
7997 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
7998 {
7999         struct pci_dev *peer;
8000         unsigned int func, devnr = tp->pdev->devfn & ~7;
8001
8002         for (func = 0; func < 8; func++) {
8003                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8004                 if (peer && peer != tp->pdev)
8005                         break;
8006                 pci_dev_put(peer);
8007         }
8008         if (!peer || peer == tp->pdev)
8009                 BUG();
8010
8011         /*
8012          * We don't need to keep the refcount elevated; there's no way
8013          * to remove one half of this device without removing the other
8014          */
8015         pci_dev_put(peer);
8016
8017         return peer;
8018 }
8019
8020 static int __devinit tg3_init_one(struct pci_dev *pdev,
8021                                   const struct pci_device_id *ent)
8022 {
8023         static int tg3_version_printed = 0;
8024         unsigned long tg3reg_base, tg3reg_len;
8025         struct net_device *dev;
8026         struct tg3 *tp;
8027         int i, err, pci_using_dac, pm_cap;
8028
8029         if (tg3_version_printed++ == 0)
8030                 printk(KERN_INFO "%s", version);
8031
8032         err = pci_enable_device(pdev);
8033         if (err) {
8034                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8035                        "aborting.\n");
8036                 return err;
8037         }
8038
8039         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8040                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8041                        "base address, aborting.\n");
8042                 err = -ENODEV;
8043                 goto err_out_disable_pdev;
8044         }
8045
8046         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8047         if (err) {
8048                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8049                        "aborting.\n");
8050                 goto err_out_disable_pdev;
8051         }
8052
8053         pci_set_master(pdev);
8054
8055         /* Find power-management capability. */
8056         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8057         if (pm_cap == 0) {
8058                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8059                        "aborting.\n");
8060                 err = -EIO;
8061                 goto err_out_free_res;
8062         }
8063
8064         /* Configure DMA attributes. */
8065         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8066         if (!err) {
8067                 pci_using_dac = 1;
8068                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8069                 if (err < 0) {
8070                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8071                                "for consistent allocations\n");
8072                         goto err_out_free_res;
8073                 }
8074         } else {
8075                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8076                 if (err) {
8077                         printk(KERN_ERR PFX "No usable DMA configuration, "
8078                                "aborting.\n");
8079                         goto err_out_free_res;
8080                 }
8081                 pci_using_dac = 0;
8082         }
8083
8084         tg3reg_base = pci_resource_start(pdev, 0);
8085         tg3reg_len = pci_resource_len(pdev, 0);
8086
8087         dev = alloc_etherdev(sizeof(*tp));
8088         if (!dev) {
8089                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8090                 err = -ENOMEM;
8091                 goto err_out_free_res;
8092         }
8093
8094         SET_MODULE_OWNER(dev);
8095         SET_NETDEV_DEV(dev, &pdev->dev);
8096
8097         if (pci_using_dac)
8098                 dev->features |= NETIF_F_HIGHDMA;
8099 #if TG3_VLAN_TAG_USED
8100         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8101         dev->vlan_rx_register = tg3_vlan_rx_register;
8102         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8103 #endif
8104
8105         tp = netdev_priv(dev);
8106         tp->pdev = pdev;
8107         tp->dev = dev;
8108         tp->pm_cap = pm_cap;
8109         tp->mac_mode = TG3_DEF_MAC_MODE;
8110         tp->rx_mode = TG3_DEF_RX_MODE;
8111         tp->tx_mode = TG3_DEF_TX_MODE;
8112         tp->mi_mode = MAC_MI_MODE_BASE;
8113         if (tg3_debug > 0)
8114                 tp->msg_enable = tg3_debug;
8115         else
8116                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8117
8118         /* The word/byte swap controls here control register access byte
8119          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8120          * setting below.
8121          */
8122         tp->misc_host_ctrl =
8123                 MISC_HOST_CTRL_MASK_PCI_INT |
8124                 MISC_HOST_CTRL_WORD_SWAP |
8125                 MISC_HOST_CTRL_INDIR_ACCESS |
8126                 MISC_HOST_CTRL_PCISTATE_RW;
8127
8128         /* The NONFRM (non-frame) byte/word swap controls take effect
8129          * on descriptor entries, anything which isn't packet data.
8130          *
8131          * The StrongARM chips on the board (one for tx, one for rx)
8132          * are running in big-endian mode.
8133          */
8134         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8135                         GRC_MODE_WSWAP_NONFRM_DATA);
8136 #ifdef __BIG_ENDIAN
8137         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8138 #endif
8139         spin_lock_init(&tp->lock);
8140         spin_lock_init(&tp->tx_lock);
8141         spin_lock_init(&tp->indirect_lock);
8142         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8143
8144         tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
8145         if (tp->regs == 0UL) {
8146                 printk(KERN_ERR PFX "Cannot map device registers, "
8147                        "aborting.\n");
8148                 err = -ENOMEM;
8149                 goto err_out_free_dev;
8150         }
8151
8152         tg3_init_link_config(tp);
8153
8154         tg3_init_bufmgr_config(tp);
8155
8156         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8157         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8158         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8159
8160         dev->open = tg3_open;
8161         dev->stop = tg3_close;
8162         dev->get_stats = tg3_get_stats;
8163         dev->set_multicast_list = tg3_set_rx_mode;
8164         dev->set_mac_address = tg3_set_mac_addr;
8165         dev->do_ioctl = tg3_ioctl;
8166         dev->tx_timeout = tg3_tx_timeout;
8167         dev->poll = tg3_poll;
8168         dev->ethtool_ops = &tg3_ethtool_ops;
8169         dev->weight = 64;
8170         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8171         dev->change_mtu = tg3_change_mtu;
8172         dev->irq = pdev->irq;
8173 #ifdef CONFIG_NET_POLL_CONTROLLER
8174         dev->poll_controller = tg3_poll_controller;
8175 #endif
8176
8177         err = tg3_get_invariants(tp);
8178         if (err) {
8179                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8180                        "aborting.\n");
8181                 goto err_out_iounmap;
8182         }
8183
8184         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8186                 tp->bufmgr_config.mbuf_read_dma_low_water =
8187                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8188                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8189                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8190                 tp->bufmgr_config.mbuf_high_water =
8191                         DEFAULT_MB_HIGH_WATER_5705;
8192         }
8193
8194 #if TG3_TSO_SUPPORT != 0
8195         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8196             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8197             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8198             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8199              GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8200                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8201         } else {
8202                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8203         }
8204
8205         /* TSO is off by default, user can enable using ethtool.  */
8206 #if 0
8207         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8208                 dev->features |= NETIF_F_TSO;
8209 #endif
8210
8211 #endif
8212
8213         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8214             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8215             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8216                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8217                 tp->rx_pending = 63;
8218         }
8219
8220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8221                 tp->pdev_peer = tg3_find_5704_peer(tp);
8222
8223         err = tg3_get_device_address(tp);
8224         if (err) {
8225                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8226                        "aborting.\n");
8227                 goto err_out_iounmap;
8228         }
8229
8230         /*
8231          * Reset chip in case UNDI or EFI driver did not shutdown
8232          * DMA self test will enable WDMAC and we'll see (spurious)
8233          * pending DMA on the PCI bus at that point.
8234          */
8235         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8236             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8237                 pci_save_state(tp->pdev, tp->pci_cfg_state);
8238                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8239                 tg3_halt(tp);
8240         }
8241
8242         err = tg3_test_dma(tp);
8243         if (err) {
8244                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8245                 goto err_out_iounmap;
8246         }
8247
8248         /* Tigon3 can do ipv4 only... and some chips have buggy
8249          * checksumming.
8250          */
8251         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8252                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8253                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8254         } else
8255                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8256
8257         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8258                 dev->features &= ~NETIF_F_HIGHDMA;
8259
8260         err = register_netdev(dev);
8261         if (err) {
8262                 printk(KERN_ERR PFX "Cannot register net device, "
8263                        "aborting.\n");
8264                 goto err_out_iounmap;
8265         }
8266
8267         pci_set_drvdata(pdev, dev);
8268
8269         /* Now that we have fully setup the chip, save away a snapshot
8270          * of the PCI config space.  We need to restore this after
8271          * GRC_MISC_CFG core clock resets and some resume events.
8272          */
8273         pci_save_state(tp->pdev, tp->pci_cfg_state);
8274
8275         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8276                dev->name,
8277                tp->board_part_number,
8278                tp->pci_chip_rev_id,
8279                tg3_phy_string(tp),
8280                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8281                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8282                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8283                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8284                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8285                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8286
8287         for (i = 0; i < 6; i++)
8288                 printk("%2.2x%c", dev->dev_addr[i],
8289                        i == 5 ? '\n' : ':');
8290
8291         printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
8292                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8293                "TSOcap[%d] \n",
8294                dev->name,
8295                (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
8296                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8297                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8298                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8299                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8300                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8301                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8302                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8303
8304         return 0;
8305
8306 err_out_iounmap:
8307         iounmap((void *) tp->regs);
8308
8309 err_out_free_dev:
8310         free_netdev(dev);
8311
8312 err_out_free_res:
8313         pci_release_regions(pdev);
8314
8315 err_out_disable_pdev:
8316         pci_disable_device(pdev);
8317         pci_set_drvdata(pdev, NULL);
8318         return err;
8319 }
8320
8321 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8322 {
8323         struct net_device *dev = pci_get_drvdata(pdev);
8324
8325         if (dev) {
8326                 struct tg3 *tp = netdev_priv(dev);
8327
8328                 unregister_netdev(dev);
8329                 iounmap((void *)tp->regs);
8330                 free_netdev(dev);
8331                 pci_release_regions(pdev);
8332                 pci_disable_device(pdev);
8333                 pci_set_drvdata(pdev, NULL);
8334         }
8335 }
8336
8337 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8338 {
8339         struct net_device *dev = pci_get_drvdata(pdev);
8340         struct tg3 *tp = netdev_priv(dev);
8341         int err;
8342
8343         if (!netif_running(dev))
8344                 return 0;
8345
8346         tg3_netif_stop(tp);
8347
8348         del_timer_sync(&tp->timer);
8349
8350         spin_lock_irq(&tp->lock);
8351         spin_lock(&tp->tx_lock);
8352         tg3_disable_ints(tp);
8353         spin_unlock(&tp->tx_lock);
8354         spin_unlock_irq(&tp->lock);
8355
8356         netif_device_detach(dev);
8357
8358         spin_lock_irq(&tp->lock);
8359         spin_lock(&tp->tx_lock);
8360         tg3_halt(tp);
8361         spin_unlock(&tp->tx_lock);
8362         spin_unlock_irq(&tp->lock);
8363
8364         err = tg3_set_power_state(tp, state);
8365         if (err) {
8366                 spin_lock_irq(&tp->lock);
8367                 spin_lock(&tp->tx_lock);
8368
8369                 tg3_init_hw(tp);
8370
8371                 tp->timer.expires = jiffies + tp->timer_offset;
8372                 add_timer(&tp->timer);
8373
8374                 spin_unlock(&tp->tx_lock);
8375                 spin_unlock_irq(&tp->lock);
8376
8377                 netif_device_attach(dev);
8378                 tg3_netif_start(tp);
8379         }
8380
8381         return err;
8382 }
8383
8384 static int tg3_resume(struct pci_dev *pdev)
8385 {
8386         struct net_device *dev = pci_get_drvdata(pdev);
8387         struct tg3 *tp = netdev_priv(dev);
8388         int err;
8389
8390         if (!netif_running(dev))
8391                 return 0;
8392
8393         pci_restore_state(tp->pdev, tp->pci_cfg_state);
8394
8395         err = tg3_set_power_state(tp, 0);
8396         if (err)
8397                 return err;
8398
8399         netif_device_attach(dev);
8400
8401         spin_lock_irq(&tp->lock);
8402         spin_lock(&tp->tx_lock);
8403
8404         tg3_init_hw(tp);
8405
8406         tp->timer.expires = jiffies + tp->timer_offset;
8407         add_timer(&tp->timer);
8408
8409         tg3_enable_ints(tp);
8410
8411         spin_unlock(&tp->tx_lock);
8412         spin_unlock_irq(&tp->lock);
8413
8414         tg3_netif_start(tp);
8415
8416         return 0;
8417 }
8418
8419 static struct pci_driver tg3_driver = {
8420         .name           = DRV_MODULE_NAME,
8421         .id_table       = tg3_pci_tbl,
8422         .probe          = tg3_init_one,
8423         .remove         = __devexit_p(tg3_remove_one),
8424         .suspend        = tg3_suspend,
8425         .resume         = tg3_resume
8426 };
8427
8428 static int __init tg3_init(void)
8429 {
8430         return pci_module_init(&tg3_driver);
8431 }
8432
8433 static void __exit tg3_cleanup(void)
8434 {
8435         pci_unregister_driver(&tg3_driver);
8436 }
8437
8438 module_init(tg3_init);
8439 module_exit(tg3_cleanup);