b5931a0c766420c5efc97212abf349004fcedc97
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  *
8  * Firmware is:
9  *      Copyright (C) 2000-2003 Broadcom Corporation.
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mii.h>
29 #include <linux/if_vlan.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/workqueue.h>
33
34 #include <net/checksum.h>
35
36 #include <asm/system.h>
37 #include <asm/io.h>
38 #include <asm/byteorder.h>
39 #include <asm/uaccess.h>
40
41 #ifdef CONFIG_SPARC64
42 #include <asm/idprom.h>
43 #include <asm/oplib.h>
44 #include <asm/pbm.h>
45 #endif
46
47 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #define TG3_VLAN_TAG_USED 1
49 #else
50 #define TG3_VLAN_TAG_USED 0
51 #endif
52
53 #ifdef NETIF_F_TSO
54 #define TG3_TSO_SUPPORT 1
55 #else
56 #define TG3_TSO_SUPPORT 0
57 #endif
58
59 #include "tg3.h"
60
61 #define DRV_MODULE_NAME         "tg3"
62 #define PFX DRV_MODULE_NAME     ": "
63 #define DRV_MODULE_VERSION      "3.23"
64 #define DRV_MODULE_RELDATE      "February 15, 2005"
65
66 #define TG3_DEF_MAC_MODE        0
67 #define TG3_DEF_RX_MODE         0
68 #define TG3_DEF_TX_MODE         0
69 #define TG3_DEF_MSG_ENABLE        \
70         (NETIF_MSG_DRV          | \
71          NETIF_MSG_PROBE        | \
72          NETIF_MSG_LINK         | \
73          NETIF_MSG_TIMER        | \
74          NETIF_MSG_IFDOWN       | \
75          NETIF_MSG_IFUP         | \
76          NETIF_MSG_RX_ERR       | \
77          NETIF_MSG_TX_ERR)
78
79 /* length of time before we decide the hardware is borked,
80  * and dev->tx_timeout() should be called to fix the problem
81  */
82 #define TG3_TX_TIMEOUT                  (5 * HZ)
83
84 /* hardware minimum and maximum for a single frame's data payload */
85 #define TG3_MIN_MTU                     60
86 #define TG3_MAX_MTU(tp) \
87         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
88           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
107           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
108          512 : 1024)
109
110 #define TG3_TX_RING_SIZE                512
111 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
112
113 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_RING_SIZE)
115 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116                                  TG3_RX_JUMBO_RING_SIZE)
117 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118                                    TG3_RX_RCB_RING_SIZE(tp))
119 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
120                                  TG3_TX_RING_SIZE)
121 #define TX_RING_GAP(TP) \
122         (TG3_TX_RING_SIZE - (TP)->tx_pending)
123 #define TX_BUFFS_AVAIL(TP)                                              \
124         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
125           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
126           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 static char version[] __devinitdata =
139         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143 MODULE_LICENSE("GPL");
144 MODULE_VERSION(DRV_MODULE_VERSION);
145
146 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
147 module_param(tg3_debug, int, 0);
148 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
149
150 static struct pci_device_id tg3_pci_tbl[] = {
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
473                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
474                         tw32_f(TG3PCI_CLOCK_CTRL,
475                                clock_ctrl | CLOCK_CTRL_625_CORE);
476                         udelay(40);
477                 }
478         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
479                 tw32_f(TG3PCI_CLOCK_CTRL,
480                      clock_ctrl |
481                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
482                 udelay(40);
483                 tw32_f(TG3PCI_CLOCK_CTRL,
484                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
485                 udelay(40);
486         }
487         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
488         udelay(40);
489 }
490
491 #define PHY_BUSY_LOOPS  5000
492
493 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
494 {
495         u32 frame_val;
496         unsigned int loops;
497         int ret;
498
499         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
500                 tw32_f(MAC_MI_MODE,
501                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
502                 udelay(80);
503         }
504
505         *val = 0x0;
506
507         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
508                       MI_COM_PHY_ADDR_MASK);
509         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
510                       MI_COM_REG_ADDR_MASK);
511         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
512         
513         tw32_f(MAC_MI_COM, frame_val);
514
515         loops = PHY_BUSY_LOOPS;
516         while (loops != 0) {
517                 udelay(10);
518                 frame_val = tr32(MAC_MI_COM);
519
520                 if ((frame_val & MI_COM_BUSY) == 0) {
521                         udelay(5);
522                         frame_val = tr32(MAC_MI_COM);
523                         break;
524                 }
525                 loops -= 1;
526         }
527
528         ret = -EBUSY;
529         if (loops != 0) {
530                 *val = frame_val & MI_COM_DATA_MASK;
531                 ret = 0;
532         }
533
534         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
535                 tw32_f(MAC_MI_MODE, tp->mi_mode);
536                 udelay(80);
537         }
538
539         return ret;
540 }
541
542 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
543 {
544         u32 frame_val;
545         unsigned int loops;
546         int ret;
547
548         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
549                 tw32_f(MAC_MI_MODE,
550                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
551                 udelay(80);
552         }
553
554         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
555                       MI_COM_PHY_ADDR_MASK);
556         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
557                       MI_COM_REG_ADDR_MASK);
558         frame_val |= (val & MI_COM_DATA_MASK);
559         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
560         
561         tw32_f(MAC_MI_COM, frame_val);
562
563         loops = PHY_BUSY_LOOPS;
564         while (loops != 0) {
565                 udelay(10);
566                 frame_val = tr32(MAC_MI_COM);
567                 if ((frame_val & MI_COM_BUSY) == 0) {
568                         udelay(5);
569                         frame_val = tr32(MAC_MI_COM);
570                         break;
571                 }
572                 loops -= 1;
573         }
574
575         ret = -EBUSY;
576         if (loops != 0)
577                 ret = 0;
578
579         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
580                 tw32_f(MAC_MI_MODE, tp->mi_mode);
581                 udelay(80);
582         }
583
584         return ret;
585 }
586
587 static void tg3_phy_set_wirespeed(struct tg3 *tp)
588 {
589         u32 val;
590
591         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
592                 return;
593
594         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
595             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
596                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
597                              (val | (1 << 15) | (1 << 4)));
598 }
599
600 static int tg3_bmcr_reset(struct tg3 *tp)
601 {
602         u32 phy_control;
603         int limit, err;
604
605         /* OK, reset it, and poll the BMCR_RESET bit until it
606          * clears or we time out.
607          */
608         phy_control = BMCR_RESET;
609         err = tg3_writephy(tp, MII_BMCR, phy_control);
610         if (err != 0)
611                 return -EBUSY;
612
613         limit = 5000;
614         while (limit--) {
615                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
616                 if (err != 0)
617                         return -EBUSY;
618
619                 if ((phy_control & BMCR_RESET) == 0) {
620                         udelay(40);
621                         break;
622                 }
623                 udelay(10);
624         }
625         if (limit <= 0)
626                 return -EBUSY;
627
628         return 0;
629 }
630
631 static int tg3_wait_macro_done(struct tg3 *tp)
632 {
633         int limit = 100;
634
635         while (limit--) {
636                 u32 tmp32;
637
638                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
639                         if ((tmp32 & 0x1000) == 0)
640                                 break;
641                 }
642         }
643         if (limit <= 0)
644                 return -EBUSY;
645
646         return 0;
647 }
648
649 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
650 {
651         static const u32 test_pat[4][6] = {
652         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
653         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
654         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
655         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
656         };
657         int chan;
658
659         for (chan = 0; chan < 4; chan++) {
660                 int i;
661
662                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
663                              (chan * 0x2000) | 0x0200);
664                 tg3_writephy(tp, 0x16, 0x0002);
665
666                 for (i = 0; i < 6; i++)
667                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
668                                      test_pat[chan][i]);
669
670                 tg3_writephy(tp, 0x16, 0x0202);
671                 if (tg3_wait_macro_done(tp)) {
672                         *resetp = 1;
673                         return -EBUSY;
674                 }
675
676                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
677                              (chan * 0x2000) | 0x0200);
678                 tg3_writephy(tp, 0x16, 0x0082);
679                 if (tg3_wait_macro_done(tp)) {
680                         *resetp = 1;
681                         return -EBUSY;
682                 }
683
684                 tg3_writephy(tp, 0x16, 0x0802);
685                 if (tg3_wait_macro_done(tp)) {
686                         *resetp = 1;
687                         return -EBUSY;
688                 }
689
690                 for (i = 0; i < 6; i += 2) {
691                         u32 low, high;
692
693                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
694                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
695                             tg3_wait_macro_done(tp)) {
696                                 *resetp = 1;
697                                 return -EBUSY;
698                         }
699                         low &= 0x7fff;
700                         high &= 0x000f;
701                         if (low != test_pat[chan][i] ||
702                             high != test_pat[chan][i+1]) {
703                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
705                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
706
707                                 return -EBUSY;
708                         }
709                 }
710         }
711
712         return 0;
713 }
714
715 static int tg3_phy_reset_chanpat(struct tg3 *tp)
716 {
717         int chan;
718
719         for (chan = 0; chan < 4; chan++) {
720                 int i;
721
722                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
723                              (chan * 0x2000) | 0x0200);
724                 tg3_writephy(tp, 0x16, 0x0002);
725                 for (i = 0; i < 6; i++)
726                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
727                 tg3_writephy(tp, 0x16, 0x0202);
728                 if (tg3_wait_macro_done(tp))
729                         return -EBUSY;
730         }
731
732         return 0;
733 }
734
735 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
736 {
737         u32 reg32, phy9_orig;
738         int retries, do_phy_reset, err;
739
740         retries = 10;
741         do_phy_reset = 1;
742         do {
743                 if (do_phy_reset) {
744                         err = tg3_bmcr_reset(tp);
745                         if (err)
746                                 return err;
747                         do_phy_reset = 0;
748                 }
749
750                 /* Disable transmitter and interrupt.  */
751                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
752                         continue;
753
754                 reg32 |= 0x3000;
755                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
756
757                 /* Set full-duplex, 1000 mbps.  */
758                 tg3_writephy(tp, MII_BMCR,
759                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
760
761                 /* Set to master mode.  */
762                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
763                         continue;
764
765                 tg3_writephy(tp, MII_TG3_CTRL,
766                              (MII_TG3_CTRL_AS_MASTER |
767                               MII_TG3_CTRL_ENABLE_AS_MASTER));
768
769                 /* Enable SM_DSP_CLOCK and 6dB.  */
770                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
771
772                 /* Block the PHY control access.  */
773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
774                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
775
776                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
777                 if (!err)
778                         break;
779         } while (--retries);
780
781         err = tg3_phy_reset_chanpat(tp);
782         if (err)
783                 return err;
784
785         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
786         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
787
788         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
789         tg3_writephy(tp, 0x16, 0x0000);
790
791         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
793                 /* Set Extended packet length bit for jumbo frames */
794                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
795         }
796         else {
797                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
798         }
799
800         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
801
802         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
803                 reg32 &= ~0x3000;
804                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
805         } else if (!err)
806                 err = -EBUSY;
807
808         return err;
809 }
810
811 /* This will reset the tigon3 PHY if there is no valid
812  * link unless the FORCE argument is non-zero.
813  */
814 static int tg3_phy_reset(struct tg3 *tp)
815 {
816         u32 phy_status;
817         int err;
818
819         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
820         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
821         if (err != 0)
822                 return -EBUSY;
823
824         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
826             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
827                 err = tg3_phy_reset_5703_4_5(tp);
828                 if (err)
829                         return err;
830                 goto out;
831         }
832
833         err = tg3_bmcr_reset(tp);
834         if (err)
835                 return err;
836
837 out:
838         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
839                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
841                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
843                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
844                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
845         }
846         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848                 tg3_writephy(tp, 0x1c, 0x8d68);
849         }
850         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
851                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
852                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
853                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
854                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
855                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
856                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
857                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
858                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
859         }
860         /* Set Extended packet length bit (bit 14) on all chips that */
861         /* support jumbo frames */
862         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
863                 /* Cannot do read-modify-write on 5401 */
864                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
865         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
866                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
867                 u32 phy_reg;
868
869                 /* Set bit 14 with read-modify-write to preserve other bits */
870                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
871                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
872                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
873         }
874         tg3_phy_set_wirespeed(tp);
875         return 0;
876 }
877
878 static void tg3_frob_aux_power(struct tg3 *tp)
879 {
880         struct tg3 *tp_peer = tp;
881
882         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
883                 return;
884
885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
886                 tp_peer = pci_get_drvdata(tp->pdev_peer);
887                 if (!tp_peer)
888                         BUG();
889         }
890
891
892         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
893             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
894                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
895                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
896                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
897                              (GRC_LCLCTRL_GPIO_OE0 |
898                               GRC_LCLCTRL_GPIO_OE1 |
899                               GRC_LCLCTRL_GPIO_OE2 |
900                               GRC_LCLCTRL_GPIO_OUTPUT0 |
901                               GRC_LCLCTRL_GPIO_OUTPUT1));
902                         udelay(100);
903                 } else {
904                         u32 no_gpio2;
905                         u32 grc_local_ctrl;
906
907                         if (tp_peer != tp &&
908                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
909                                 return;
910
911                         /* On 5753 and variants, GPIO2 cannot be used. */
912                         no_gpio2 = tp->nic_sram_data_cfg &
913                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
914
915                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
916                                          GRC_LCLCTRL_GPIO_OE1 |
917                                          GRC_LCLCTRL_GPIO_OE2 |
918                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
919                                          GRC_LCLCTRL_GPIO_OUTPUT2;
920                         if (no_gpio2) {
921                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
922                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
923                         }
924                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
925                                                 grc_local_ctrl);
926                         udelay(100);
927
928                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
929
930                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
931                                                 grc_local_ctrl);
932                         udelay(100);
933
934                         if (!no_gpio2) {
935                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
936                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
937                                        grc_local_ctrl);
938                                 udelay(100);
939                         }
940                 }
941         } else {
942                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
943                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
944                         if (tp_peer != tp &&
945                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
946                                 return;
947
948                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
949                              (GRC_LCLCTRL_GPIO_OE1 |
950                               GRC_LCLCTRL_GPIO_OUTPUT1));
951                         udelay(100);
952
953                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
954                              (GRC_LCLCTRL_GPIO_OE1));
955                         udelay(100);
956
957                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958                              (GRC_LCLCTRL_GPIO_OE1 |
959                               GRC_LCLCTRL_GPIO_OUTPUT1));
960                         udelay(100);
961                 }
962         }
963 }
964
965 static int tg3_setup_phy(struct tg3 *, int);
966
967 #define RESET_KIND_SHUTDOWN     0
968 #define RESET_KIND_INIT         1
969 #define RESET_KIND_SUSPEND      2
970
971 static void tg3_write_sig_post_reset(struct tg3 *, int);
972
973 static int tg3_set_power_state(struct tg3 *tp, int state)
974 {
975         u32 misc_host_ctrl;
976         u16 power_control, power_caps;
977         int pm = tp->pm_cap;
978
979         /* Make sure register accesses (indirect or otherwise)
980          * will function correctly.
981          */
982         pci_write_config_dword(tp->pdev,
983                                TG3PCI_MISC_HOST_CTRL,
984                                tp->misc_host_ctrl);
985
986         pci_read_config_word(tp->pdev,
987                              pm + PCI_PM_CTRL,
988                              &power_control);
989         power_control |= PCI_PM_CTRL_PME_STATUS;
990         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
991         switch (state) {
992         case 0:
993                 power_control |= 0;
994                 pci_write_config_word(tp->pdev,
995                                       pm + PCI_PM_CTRL,
996                                       power_control);
997                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
998                 udelay(100);
999
1000                 return 0;
1001
1002         case 1:
1003                 power_control |= 1;
1004                 break;
1005
1006         case 2:
1007                 power_control |= 2;
1008                 break;
1009
1010         case 3:
1011                 power_control |= 3;
1012                 break;
1013
1014         default:
1015                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1016                        "requested.\n",
1017                        tp->dev->name, state);
1018                 return -EINVAL;
1019         };
1020
1021         power_control |= PCI_PM_CTRL_PME_ENABLE;
1022
1023         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1024         tw32(TG3PCI_MISC_HOST_CTRL,
1025              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1026
1027         if (tp->link_config.phy_is_low_power == 0) {
1028                 tp->link_config.phy_is_low_power = 1;
1029                 tp->link_config.orig_speed = tp->link_config.speed;
1030                 tp->link_config.orig_duplex = tp->link_config.duplex;
1031                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1032         }
1033
1034         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1035                 tp->link_config.speed = SPEED_10;
1036                 tp->link_config.duplex = DUPLEX_HALF;
1037                 tp->link_config.autoneg = AUTONEG_ENABLE;
1038                 tg3_setup_phy(tp, 0);
1039         }
1040
1041         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1042
1043         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1044                 u32 mac_mode;
1045
1046                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1047                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1048                         udelay(40);
1049
1050                         mac_mode = MAC_MODE_PORT_MODE_MII;
1051
1052                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1053                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1054                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1055                 } else {
1056                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1057                 }
1058
1059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1060                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1061
1062                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1063                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1064                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1065
1066                 tw32_f(MAC_MODE, mac_mode);
1067                 udelay(100);
1068
1069                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1070                 udelay(10);
1071         }
1072
1073         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1074             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1075              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1076                 u32 base_val;
1077
1078                 base_val = tp->pci_clock_ctrl;
1079                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1080                              CLOCK_CTRL_TXCLK_DISABLE);
1081
1082                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1083                      CLOCK_CTRL_ALTCLK |
1084                      CLOCK_CTRL_PWRDOWN_PLL133);
1085                 udelay(40);
1086         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1087                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1088                 u32 newbits1, newbits2;
1089
1090                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1092                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1093                                     CLOCK_CTRL_TXCLK_DISABLE |
1094                                     CLOCK_CTRL_ALTCLK);
1095                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1096                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1097                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1098                         newbits1 = CLOCK_CTRL_625_CORE;
1099                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1100                 } else {
1101                         newbits1 = CLOCK_CTRL_ALTCLK;
1102                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1103                 }
1104
1105                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1106                 udelay(40);
1107
1108                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1109                 udelay(40);
1110
1111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1112                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1113                         u32 newbits3;
1114
1115                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1116                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1117                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1118                                             CLOCK_CTRL_TXCLK_DISABLE |
1119                                             CLOCK_CTRL_44MHZ_CORE);
1120                         } else {
1121                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1122                         }
1123
1124                         tw32_f(TG3PCI_CLOCK_CTRL,
1125                                          tp->pci_clock_ctrl | newbits3);
1126                         udelay(40);
1127                 }
1128         }
1129
1130         tg3_frob_aux_power(tp);
1131
1132         /* Finally, set the new power state. */
1133         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1134
1135         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1136
1137         return 0;
1138 }
1139
1140 static void tg3_link_report(struct tg3 *tp)
1141 {
1142         if (!netif_carrier_ok(tp->dev)) {
1143                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1144         } else {
1145                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1146                        tp->dev->name,
1147                        (tp->link_config.active_speed == SPEED_1000 ?
1148                         1000 :
1149                         (tp->link_config.active_speed == SPEED_100 ?
1150                          100 : 10)),
1151                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1152                         "full" : "half"));
1153
1154                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1155                        "%s for RX.\n",
1156                        tp->dev->name,
1157                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1158                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1159         }
1160 }
1161
1162 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1163 {
1164         u32 new_tg3_flags = 0;
1165         u32 old_rx_mode = tp->rx_mode;
1166         u32 old_tx_mode = tp->tx_mode;
1167
1168         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1169                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1170                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1171                                 if (remote_adv & LPA_PAUSE_CAP)
1172                                         new_tg3_flags |=
1173                                                 (TG3_FLAG_RX_PAUSE |
1174                                                 TG3_FLAG_TX_PAUSE);
1175                                 else if (remote_adv & LPA_PAUSE_ASYM)
1176                                         new_tg3_flags |=
1177                                                 (TG3_FLAG_RX_PAUSE);
1178                         } else {
1179                                 if (remote_adv & LPA_PAUSE_CAP)
1180                                         new_tg3_flags |=
1181                                                 (TG3_FLAG_RX_PAUSE |
1182                                                 TG3_FLAG_TX_PAUSE);
1183                         }
1184                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1185                         if ((remote_adv & LPA_PAUSE_CAP) &&
1186                         (remote_adv & LPA_PAUSE_ASYM))
1187                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1188                 }
1189
1190                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1191                 tp->tg3_flags |= new_tg3_flags;
1192         } else {
1193                 new_tg3_flags = tp->tg3_flags;
1194         }
1195
1196         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1197                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1198         else
1199                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1200
1201         if (old_rx_mode != tp->rx_mode) {
1202                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1203         }
1204         
1205         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1206                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1207         else
1208                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1209
1210         if (old_tx_mode != tp->tx_mode) {
1211                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1212         }
1213 }
1214
1215 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1216 {
1217         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1218         case MII_TG3_AUX_STAT_10HALF:
1219                 *speed = SPEED_10;
1220                 *duplex = DUPLEX_HALF;
1221                 break;
1222
1223         case MII_TG3_AUX_STAT_10FULL:
1224                 *speed = SPEED_10;
1225                 *duplex = DUPLEX_FULL;
1226                 break;
1227
1228         case MII_TG3_AUX_STAT_100HALF:
1229                 *speed = SPEED_100;
1230                 *duplex = DUPLEX_HALF;
1231                 break;
1232
1233         case MII_TG3_AUX_STAT_100FULL:
1234                 *speed = SPEED_100;
1235                 *duplex = DUPLEX_FULL;
1236                 break;
1237
1238         case MII_TG3_AUX_STAT_1000HALF:
1239                 *speed = SPEED_1000;
1240                 *duplex = DUPLEX_HALF;
1241                 break;
1242
1243         case MII_TG3_AUX_STAT_1000FULL:
1244                 *speed = SPEED_1000;
1245                 *duplex = DUPLEX_FULL;
1246                 break;
1247
1248         default:
1249                 *speed = SPEED_INVALID;
1250                 *duplex = DUPLEX_INVALID;
1251                 break;
1252         };
1253 }
1254
1255 static void tg3_phy_copper_begin(struct tg3 *tp)
1256 {
1257         u32 new_adv;
1258         int i;
1259
1260         if (tp->link_config.phy_is_low_power) {
1261                 /* Entering low power mode.  Disable gigabit and
1262                  * 100baseT advertisements.
1263                  */
1264                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1265
1266                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1267                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1268                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1269                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1270
1271                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1272         } else if (tp->link_config.speed == SPEED_INVALID) {
1273                 tp->link_config.advertising =
1274                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1275                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1276                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1277                          ADVERTISED_Autoneg | ADVERTISED_MII);
1278
1279                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1280                         tp->link_config.advertising &=
1281                                 ~(ADVERTISED_1000baseT_Half |
1282                                   ADVERTISED_1000baseT_Full);
1283
1284                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1285                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1286                         new_adv |= ADVERTISE_10HALF;
1287                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1288                         new_adv |= ADVERTISE_10FULL;
1289                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1290                         new_adv |= ADVERTISE_100HALF;
1291                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1292                         new_adv |= ADVERTISE_100FULL;
1293                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1294
1295                 if (tp->link_config.advertising &
1296                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1297                         new_adv = 0;
1298                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1299                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1300                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1301                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1302                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1303                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1304                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1305                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1306                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1307                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1308                 } else {
1309                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1310                 }
1311         } else {
1312                 /* Asking for a specific link mode. */
1313                 if (tp->link_config.speed == SPEED_1000) {
1314                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1315                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1316
1317                         if (tp->link_config.duplex == DUPLEX_FULL)
1318                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1319                         else
1320                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1321                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1322                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1323                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1324                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1325                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1326                 } else {
1327                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1328
1329                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1330                         if (tp->link_config.speed == SPEED_100) {
1331                                 if (tp->link_config.duplex == DUPLEX_FULL)
1332                                         new_adv |= ADVERTISE_100FULL;
1333                                 else
1334                                         new_adv |= ADVERTISE_100HALF;
1335                         } else {
1336                                 if (tp->link_config.duplex == DUPLEX_FULL)
1337                                         new_adv |= ADVERTISE_10FULL;
1338                                 else
1339                                         new_adv |= ADVERTISE_10HALF;
1340                         }
1341                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1342                 }
1343         }
1344
1345         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1346             tp->link_config.speed != SPEED_INVALID) {
1347                 u32 bmcr, orig_bmcr;
1348
1349                 tp->link_config.active_speed = tp->link_config.speed;
1350                 tp->link_config.active_duplex = tp->link_config.duplex;
1351
1352                 bmcr = 0;
1353                 switch (tp->link_config.speed) {
1354                 default:
1355                 case SPEED_10:
1356                         break;
1357
1358                 case SPEED_100:
1359                         bmcr |= BMCR_SPEED100;
1360                         break;
1361
1362                 case SPEED_1000:
1363                         bmcr |= TG3_BMCR_SPEED1000;
1364                         break;
1365                 };
1366
1367                 if (tp->link_config.duplex == DUPLEX_FULL)
1368                         bmcr |= BMCR_FULLDPLX;
1369
1370                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1371                     (bmcr != orig_bmcr)) {
1372                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1373                         for (i = 0; i < 1500; i++) {
1374                                 u32 tmp;
1375
1376                                 udelay(10);
1377                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1378                                     tg3_readphy(tp, MII_BMSR, &tmp))
1379                                         continue;
1380                                 if (!(tmp & BMSR_LSTATUS)) {
1381                                         udelay(40);
1382                                         break;
1383                                 }
1384                         }
1385                         tg3_writephy(tp, MII_BMCR, bmcr);
1386                         udelay(40);
1387                 }
1388         } else {
1389                 tg3_writephy(tp, MII_BMCR,
1390                              BMCR_ANENABLE | BMCR_ANRESTART);
1391         }
1392 }
1393
1394 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1395 {
1396         int err;
1397
1398         /* Turn off tap power management. */
1399         /* Set Extended packet length bit */
1400         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1401
1402         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1403         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1404
1405         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1406         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1407
1408         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1409         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1410
1411         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1412         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1413
1414         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1415         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1416
1417         udelay(40);
1418
1419         return err;
1420 }
1421
1422 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1423 {
1424         u32 adv_reg, all_mask;
1425
1426         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1427                 return 0;
1428
1429         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1430                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1431         if ((adv_reg & all_mask) != all_mask)
1432                 return 0;
1433         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1434                 u32 tg3_ctrl;
1435
1436                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1437                         return 0;
1438
1439                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1440                             MII_TG3_CTRL_ADV_1000_FULL);
1441                 if ((tg3_ctrl & all_mask) != all_mask)
1442                         return 0;
1443         }
1444         return 1;
1445 }
1446
1447 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1448 {
1449         int current_link_up;
1450         u32 bmsr, dummy;
1451         u16 current_speed;
1452         u8 current_duplex;
1453         int i, err;
1454
1455         tw32(MAC_EVENT, 0);
1456
1457         tw32_f(MAC_STATUS,
1458              (MAC_STATUS_SYNC_CHANGED |
1459               MAC_STATUS_CFG_CHANGED |
1460               MAC_STATUS_MI_COMPLETION |
1461               MAC_STATUS_LNKSTATE_CHANGED));
1462         udelay(40);
1463
1464         tp->mi_mode = MAC_MI_MODE_BASE;
1465         tw32_f(MAC_MI_MODE, tp->mi_mode);
1466         udelay(80);
1467
1468         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1469
1470         /* Some third-party PHYs need to be reset on link going
1471          * down.
1472          */
1473         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1474              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1475              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1476             netif_carrier_ok(tp->dev)) {
1477                 tg3_readphy(tp, MII_BMSR, &bmsr);
1478                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1479                     !(bmsr & BMSR_LSTATUS))
1480                         force_reset = 1;
1481         }
1482         if (force_reset)
1483                 tg3_phy_reset(tp);
1484
1485         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1486                 tg3_readphy(tp, MII_BMSR, &bmsr);
1487                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1488                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1489                         bmsr = 0;
1490
1491                 if (!(bmsr & BMSR_LSTATUS)) {
1492                         err = tg3_init_5401phy_dsp(tp);
1493                         if (err)
1494                                 return err;
1495
1496                         tg3_readphy(tp, MII_BMSR, &bmsr);
1497                         for (i = 0; i < 1000; i++) {
1498                                 udelay(10);
1499                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1500                                     (bmsr & BMSR_LSTATUS)) {
1501                                         udelay(40);
1502                                         break;
1503                                 }
1504                         }
1505
1506                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1507                             !(bmsr & BMSR_LSTATUS) &&
1508                             tp->link_config.active_speed == SPEED_1000) {
1509                                 err = tg3_phy_reset(tp);
1510                                 if (!err)
1511                                         err = tg3_init_5401phy_dsp(tp);
1512                                 if (err)
1513                                         return err;
1514                         }
1515                 }
1516         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1517                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1518                 /* 5701 {A0,B0} CRC bug workaround */
1519                 tg3_writephy(tp, 0x15, 0x0a75);
1520                 tg3_writephy(tp, 0x1c, 0x8c68);
1521                 tg3_writephy(tp, 0x1c, 0x8d68);
1522                 tg3_writephy(tp, 0x1c, 0x8c68);
1523         }
1524
1525         /* Clear pending interrupts... */
1526         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1527         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1528
1529         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1530                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1531         else
1532                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1533
1534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1536                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1537                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1538                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1539                 else
1540                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1541         }
1542
1543         current_link_up = 0;
1544         current_speed = SPEED_INVALID;
1545         current_duplex = DUPLEX_INVALID;
1546
1547         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1548                 u32 val;
1549
1550                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1551                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1552                 if (!(val & (1 << 10))) {
1553                         val |= (1 << 10);
1554                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1555                         goto relink;
1556                 }
1557         }
1558
1559         bmsr = 0;
1560         for (i = 0; i < 100; i++) {
1561                 tg3_readphy(tp, MII_BMSR, &bmsr);
1562                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1563                     (bmsr & BMSR_LSTATUS))
1564                         break;
1565                 udelay(40);
1566         }
1567
1568         if (bmsr & BMSR_LSTATUS) {
1569                 u32 aux_stat, bmcr;
1570
1571                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1572                 for (i = 0; i < 2000; i++) {
1573                         udelay(10);
1574                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1575                             aux_stat)
1576                                 break;
1577                 }
1578
1579                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1580                                              &current_speed,
1581                                              &current_duplex);
1582
1583                 bmcr = 0;
1584                 for (i = 0; i < 200; i++) {
1585                         tg3_readphy(tp, MII_BMCR, &bmcr);
1586                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1587                                 continue;
1588                         if (bmcr && bmcr != 0x7fff)
1589                                 break;
1590                         udelay(10);
1591                 }
1592
1593                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1594                         if (bmcr & BMCR_ANENABLE) {
1595                                 current_link_up = 1;
1596
1597                                 /* Force autoneg restart if we are exiting
1598                                  * low power mode.
1599                                  */
1600                                 if (!tg3_copper_is_advertising_all(tp))
1601                                         current_link_up = 0;
1602                         } else {
1603                                 current_link_up = 0;
1604                         }
1605                 } else {
1606                         if (!(bmcr & BMCR_ANENABLE) &&
1607                             tp->link_config.speed == current_speed &&
1608                             tp->link_config.duplex == current_duplex) {
1609                                 current_link_up = 1;
1610                         } else {
1611                                 current_link_up = 0;
1612                         }
1613                 }
1614
1615                 tp->link_config.active_speed = current_speed;
1616                 tp->link_config.active_duplex = current_duplex;
1617         }
1618
1619         if (current_link_up == 1 &&
1620             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1621             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1622                 u32 local_adv, remote_adv;
1623
1624                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1625                         local_adv = 0;
1626                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1627
1628                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1629                         remote_adv = 0;
1630
1631                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1632
1633                 /* If we are not advertising full pause capability,
1634                  * something is wrong.  Bring the link down and reconfigure.
1635                  */
1636                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1637                         current_link_up = 0;
1638                 } else {
1639                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1640                 }
1641         }
1642 relink:
1643         if (current_link_up == 0) {
1644                 u32 tmp;
1645
1646                 tg3_phy_copper_begin(tp);
1647
1648                 tg3_readphy(tp, MII_BMSR, &tmp);
1649                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1650                     (tmp & BMSR_LSTATUS))
1651                         current_link_up = 1;
1652         }
1653
1654         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1655         if (current_link_up == 1) {
1656                 if (tp->link_config.active_speed == SPEED_100 ||
1657                     tp->link_config.active_speed == SPEED_10)
1658                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1659                 else
1660                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1661         } else
1662                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1663
1664         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1665         if (tp->link_config.active_duplex == DUPLEX_HALF)
1666                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1667
1668         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1670                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1671                     (current_link_up == 1 &&
1672                      tp->link_config.active_speed == SPEED_10))
1673                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1674         } else {
1675                 if (current_link_up == 1)
1676                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1677         }
1678
1679         /* ??? Without this setting Netgear GA302T PHY does not
1680          * ??? send/receive packets...
1681          */
1682         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1683             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1684                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1685                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1686                 udelay(80);
1687         }
1688
1689         tw32_f(MAC_MODE, tp->mac_mode);
1690         udelay(40);
1691
1692         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1693                 /* Polled via timer. */
1694                 tw32_f(MAC_EVENT, 0);
1695         } else {
1696                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1697         }
1698         udelay(40);
1699
1700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1701             current_link_up == 1 &&
1702             tp->link_config.active_speed == SPEED_1000 &&
1703             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1704              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1705                 udelay(120);
1706                 tw32_f(MAC_STATUS,
1707                      (MAC_STATUS_SYNC_CHANGED |
1708                       MAC_STATUS_CFG_CHANGED));
1709                 udelay(40);
1710                 tg3_write_mem(tp,
1711                               NIC_SRAM_FIRMWARE_MBOX,
1712                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1713         }
1714
1715         if (current_link_up != netif_carrier_ok(tp->dev)) {
1716                 if (current_link_up)
1717                         netif_carrier_on(tp->dev);
1718                 else
1719                         netif_carrier_off(tp->dev);
1720                 tg3_link_report(tp);
1721         }
1722
1723         return 0;
1724 }
1725
1726 struct tg3_fiber_aneginfo {
1727         int state;
1728 #define ANEG_STATE_UNKNOWN              0
1729 #define ANEG_STATE_AN_ENABLE            1
1730 #define ANEG_STATE_RESTART_INIT         2
1731 #define ANEG_STATE_RESTART              3
1732 #define ANEG_STATE_DISABLE_LINK_OK      4
1733 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1734 #define ANEG_STATE_ABILITY_DETECT       6
1735 #define ANEG_STATE_ACK_DETECT_INIT      7
1736 #define ANEG_STATE_ACK_DETECT           8
1737 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1738 #define ANEG_STATE_COMPLETE_ACK         10
1739 #define ANEG_STATE_IDLE_DETECT_INIT     11
1740 #define ANEG_STATE_IDLE_DETECT          12
1741 #define ANEG_STATE_LINK_OK              13
1742 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1743 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1744
1745         u32 flags;
1746 #define MR_AN_ENABLE            0x00000001
1747 #define MR_RESTART_AN           0x00000002
1748 #define MR_AN_COMPLETE          0x00000004
1749 #define MR_PAGE_RX              0x00000008
1750 #define MR_NP_LOADED            0x00000010
1751 #define MR_TOGGLE_TX            0x00000020
1752 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1753 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1754 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1755 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1756 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1757 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1758 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1759 #define MR_TOGGLE_RX            0x00002000
1760 #define MR_NP_RX                0x00004000
1761
1762 #define MR_LINK_OK              0x80000000
1763
1764         unsigned long link_time, cur_time;
1765
1766         u32 ability_match_cfg;
1767         int ability_match_count;
1768
1769         char ability_match, idle_match, ack_match;
1770
1771         u32 txconfig, rxconfig;
1772 #define ANEG_CFG_NP             0x00000080
1773 #define ANEG_CFG_ACK            0x00000040
1774 #define ANEG_CFG_RF2            0x00000020
1775 #define ANEG_CFG_RF1            0x00000010
1776 #define ANEG_CFG_PS2            0x00000001
1777 #define ANEG_CFG_PS1            0x00008000
1778 #define ANEG_CFG_HD             0x00004000
1779 #define ANEG_CFG_FD             0x00002000
1780 #define ANEG_CFG_INVAL          0x00001f06
1781
1782 };
1783 #define ANEG_OK         0
1784 #define ANEG_DONE       1
1785 #define ANEG_TIMER_ENAB 2
1786 #define ANEG_FAILED     -1
1787
1788 #define ANEG_STATE_SETTLE_TIME  10000
1789
1790 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1791                                    struct tg3_fiber_aneginfo *ap)
1792 {
1793         unsigned long delta;
1794         u32 rx_cfg_reg;
1795         int ret;
1796
1797         if (ap->state == ANEG_STATE_UNKNOWN) {
1798                 ap->rxconfig = 0;
1799                 ap->link_time = 0;
1800                 ap->cur_time = 0;
1801                 ap->ability_match_cfg = 0;
1802                 ap->ability_match_count = 0;
1803                 ap->ability_match = 0;
1804                 ap->idle_match = 0;
1805                 ap->ack_match = 0;
1806         }
1807         ap->cur_time++;
1808
1809         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1810                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1811
1812                 if (rx_cfg_reg != ap->ability_match_cfg) {
1813                         ap->ability_match_cfg = rx_cfg_reg;
1814                         ap->ability_match = 0;
1815                         ap->ability_match_count = 0;
1816                 } else {
1817                         if (++ap->ability_match_count > 1) {
1818                                 ap->ability_match = 1;
1819                                 ap->ability_match_cfg = rx_cfg_reg;
1820                         }
1821                 }
1822                 if (rx_cfg_reg & ANEG_CFG_ACK)
1823                         ap->ack_match = 1;
1824                 else
1825                         ap->ack_match = 0;
1826
1827                 ap->idle_match = 0;
1828         } else {
1829                 ap->idle_match = 1;
1830                 ap->ability_match_cfg = 0;
1831                 ap->ability_match_count = 0;
1832                 ap->ability_match = 0;
1833                 ap->ack_match = 0;
1834
1835                 rx_cfg_reg = 0;
1836         }
1837
1838         ap->rxconfig = rx_cfg_reg;
1839         ret = ANEG_OK;
1840
1841         switch(ap->state) {
1842         case ANEG_STATE_UNKNOWN:
1843                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1844                         ap->state = ANEG_STATE_AN_ENABLE;
1845
1846                 /* fallthru */
1847         case ANEG_STATE_AN_ENABLE:
1848                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1849                 if (ap->flags & MR_AN_ENABLE) {
1850                         ap->link_time = 0;
1851                         ap->cur_time = 0;
1852                         ap->ability_match_cfg = 0;
1853                         ap->ability_match_count = 0;
1854                         ap->ability_match = 0;
1855                         ap->idle_match = 0;
1856                         ap->ack_match = 0;
1857
1858                         ap->state = ANEG_STATE_RESTART_INIT;
1859                 } else {
1860                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1861                 }
1862                 break;
1863
1864         case ANEG_STATE_RESTART_INIT:
1865                 ap->link_time = ap->cur_time;
1866                 ap->flags &= ~(MR_NP_LOADED);
1867                 ap->txconfig = 0;
1868                 tw32(MAC_TX_AUTO_NEG, 0);
1869                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1870                 tw32_f(MAC_MODE, tp->mac_mode);
1871                 udelay(40);
1872
1873                 ret = ANEG_TIMER_ENAB;
1874                 ap->state = ANEG_STATE_RESTART;
1875
1876                 /* fallthru */
1877         case ANEG_STATE_RESTART:
1878                 delta = ap->cur_time - ap->link_time;
1879                 if (delta > ANEG_STATE_SETTLE_TIME) {
1880                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1881                 } else {
1882                         ret = ANEG_TIMER_ENAB;
1883                 }
1884                 break;
1885
1886         case ANEG_STATE_DISABLE_LINK_OK:
1887                 ret = ANEG_DONE;
1888                 break;
1889
1890         case ANEG_STATE_ABILITY_DETECT_INIT:
1891                 ap->flags &= ~(MR_TOGGLE_TX);
1892                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1893                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1894                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1895                 tw32_f(MAC_MODE, tp->mac_mode);
1896                 udelay(40);
1897
1898                 ap->state = ANEG_STATE_ABILITY_DETECT;
1899                 break;
1900
1901         case ANEG_STATE_ABILITY_DETECT:
1902                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1903                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1904                 }
1905                 break;
1906
1907         case ANEG_STATE_ACK_DETECT_INIT:
1908                 ap->txconfig |= ANEG_CFG_ACK;
1909                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1910                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1911                 tw32_f(MAC_MODE, tp->mac_mode);
1912                 udelay(40);
1913
1914                 ap->state = ANEG_STATE_ACK_DETECT;
1915
1916                 /* fallthru */
1917         case ANEG_STATE_ACK_DETECT:
1918                 if (ap->ack_match != 0) {
1919                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1920                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1921                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1922                         } else {
1923                                 ap->state = ANEG_STATE_AN_ENABLE;
1924                         }
1925                 } else if (ap->ability_match != 0 &&
1926                            ap->rxconfig == 0) {
1927                         ap->state = ANEG_STATE_AN_ENABLE;
1928                 }
1929                 break;
1930
1931         case ANEG_STATE_COMPLETE_ACK_INIT:
1932                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1933                         ret = ANEG_FAILED;
1934                         break;
1935                 }
1936                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1937                                MR_LP_ADV_HALF_DUPLEX |
1938                                MR_LP_ADV_SYM_PAUSE |
1939                                MR_LP_ADV_ASYM_PAUSE |
1940                                MR_LP_ADV_REMOTE_FAULT1 |
1941                                MR_LP_ADV_REMOTE_FAULT2 |
1942                                MR_LP_ADV_NEXT_PAGE |
1943                                MR_TOGGLE_RX |
1944                                MR_NP_RX);
1945                 if (ap->rxconfig & ANEG_CFG_FD)
1946                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1947                 if (ap->rxconfig & ANEG_CFG_HD)
1948                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1949                 if (ap->rxconfig & ANEG_CFG_PS1)
1950                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1951                 if (ap->rxconfig & ANEG_CFG_PS2)
1952                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1953                 if (ap->rxconfig & ANEG_CFG_RF1)
1954                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1955                 if (ap->rxconfig & ANEG_CFG_RF2)
1956                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1957                 if (ap->rxconfig & ANEG_CFG_NP)
1958                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1959
1960                 ap->link_time = ap->cur_time;
1961
1962                 ap->flags ^= (MR_TOGGLE_TX);
1963                 if (ap->rxconfig & 0x0008)
1964                         ap->flags |= MR_TOGGLE_RX;
1965                 if (ap->rxconfig & ANEG_CFG_NP)
1966                         ap->flags |= MR_NP_RX;
1967                 ap->flags |= MR_PAGE_RX;
1968
1969                 ap->state = ANEG_STATE_COMPLETE_ACK;
1970                 ret = ANEG_TIMER_ENAB;
1971                 break;
1972
1973         case ANEG_STATE_COMPLETE_ACK:
1974                 if (ap->ability_match != 0 &&
1975                     ap->rxconfig == 0) {
1976                         ap->state = ANEG_STATE_AN_ENABLE;
1977                         break;
1978                 }
1979                 delta = ap->cur_time - ap->link_time;
1980                 if (delta > ANEG_STATE_SETTLE_TIME) {
1981                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1982                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1983                         } else {
1984                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1985                                     !(ap->flags & MR_NP_RX)) {
1986                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1987                                 } else {
1988                                         ret = ANEG_FAILED;
1989                                 }
1990                         }
1991                 }
1992                 break;
1993
1994         case ANEG_STATE_IDLE_DETECT_INIT:
1995                 ap->link_time = ap->cur_time;
1996                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1997                 tw32_f(MAC_MODE, tp->mac_mode);
1998                 udelay(40);
1999
2000                 ap->state = ANEG_STATE_IDLE_DETECT;
2001                 ret = ANEG_TIMER_ENAB;
2002                 break;
2003
2004         case ANEG_STATE_IDLE_DETECT:
2005                 if (ap->ability_match != 0 &&
2006                     ap->rxconfig == 0) {
2007                         ap->state = ANEG_STATE_AN_ENABLE;
2008                         break;
2009                 }
2010                 delta = ap->cur_time - ap->link_time;
2011                 if (delta > ANEG_STATE_SETTLE_TIME) {
2012                         /* XXX another gem from the Broadcom driver :( */
2013                         ap->state = ANEG_STATE_LINK_OK;
2014                 }
2015                 break;
2016
2017         case ANEG_STATE_LINK_OK:
2018                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2019                 ret = ANEG_DONE;
2020                 break;
2021
2022         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2023                 /* ??? unimplemented */
2024                 break;
2025
2026         case ANEG_STATE_NEXT_PAGE_WAIT:
2027                 /* ??? unimplemented */
2028                 break;
2029
2030         default:
2031                 ret = ANEG_FAILED;
2032                 break;
2033         };
2034
2035         return ret;
2036 }
2037
2038 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2039 {
2040         int res = 0;
2041         struct tg3_fiber_aneginfo aninfo;
2042         int status = ANEG_FAILED;
2043         unsigned int tick;
2044         u32 tmp;
2045
2046         tw32_f(MAC_TX_AUTO_NEG, 0);
2047
2048         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2049         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2050         udelay(40);
2051
2052         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2053         udelay(40);
2054
2055         memset(&aninfo, 0, sizeof(aninfo));
2056         aninfo.flags |= MR_AN_ENABLE;
2057         aninfo.state = ANEG_STATE_UNKNOWN;
2058         aninfo.cur_time = 0;
2059         tick = 0;
2060         while (++tick < 195000) {
2061                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2062                 if (status == ANEG_DONE || status == ANEG_FAILED)
2063                         break;
2064
2065                 udelay(1);
2066         }
2067
2068         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2069         tw32_f(MAC_MODE, tp->mac_mode);
2070         udelay(40);
2071
2072         *flags = aninfo.flags;
2073
2074         if (status == ANEG_DONE &&
2075             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2076                              MR_LP_ADV_FULL_DUPLEX)))
2077                 res = 1;
2078
2079         return res;
2080 }
2081
2082 static void tg3_init_bcm8002(struct tg3 *tp)
2083 {
2084         u32 mac_status = tr32(MAC_STATUS);
2085         int i;
2086
2087         /* Reset when initting first time or we have a link. */
2088         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2089             !(mac_status & MAC_STATUS_PCS_SYNCED))
2090                 return;
2091
2092         /* Set PLL lock range. */
2093         tg3_writephy(tp, 0x16, 0x8007);
2094
2095         /* SW reset */
2096         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2097
2098         /* Wait for reset to complete. */
2099         /* XXX schedule_timeout() ... */
2100         for (i = 0; i < 500; i++)
2101                 udelay(10);
2102
2103         /* Config mode; select PMA/Ch 1 regs. */
2104         tg3_writephy(tp, 0x10, 0x8411);
2105
2106         /* Enable auto-lock and comdet, select txclk for tx. */
2107         tg3_writephy(tp, 0x11, 0x0a10);
2108
2109         tg3_writephy(tp, 0x18, 0x00a0);
2110         tg3_writephy(tp, 0x16, 0x41ff);
2111
2112         /* Assert and deassert POR. */
2113         tg3_writephy(tp, 0x13, 0x0400);
2114         udelay(40);
2115         tg3_writephy(tp, 0x13, 0x0000);
2116
2117         tg3_writephy(tp, 0x11, 0x0a50);
2118         udelay(40);
2119         tg3_writephy(tp, 0x11, 0x0a10);
2120
2121         /* Wait for signal to stabilize */
2122         /* XXX schedule_timeout() ... */
2123         for (i = 0; i < 15000; i++)
2124                 udelay(10);
2125
2126         /* Deselect the channel register so we can read the PHYID
2127          * later.
2128          */
2129         tg3_writephy(tp, 0x10, 0x8011);
2130 }
2131
2132 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2133 {
2134         u32 sg_dig_ctrl, sg_dig_status;
2135         u32 serdes_cfg, expected_sg_dig_ctrl;
2136         int workaround, port_a;
2137         int current_link_up;
2138
2139         serdes_cfg = 0;
2140         expected_sg_dig_ctrl = 0;
2141         workaround = 0;
2142         port_a = 1;
2143         current_link_up = 0;
2144
2145         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2146             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2147                 workaround = 1;
2148                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2149                         port_a = 0;
2150
2151                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2152                 /* preserve bits 20-23 for voltage regulator */
2153                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2154         }
2155
2156         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2157
2158         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2159                 if (sg_dig_ctrl & (1 << 31)) {
2160                         if (workaround) {
2161                                 u32 val = serdes_cfg;
2162
2163                                 if (port_a)
2164                                         val |= 0xc010000;
2165                                 else
2166                                         val |= 0x4010000;
2167                                 tw32_f(MAC_SERDES_CFG, val);
2168                         }
2169                         tw32_f(SG_DIG_CTRL, 0x01388400);
2170                 }
2171                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2172                         tg3_setup_flow_control(tp, 0, 0);
2173                         current_link_up = 1;
2174                 }
2175                 goto out;
2176         }
2177
2178         /* Want auto-negotiation.  */
2179         expected_sg_dig_ctrl = 0x81388400;
2180
2181         /* Pause capability */
2182         expected_sg_dig_ctrl |= (1 << 11);
2183
2184         /* Asymettric pause */
2185         expected_sg_dig_ctrl |= (1 << 12);
2186
2187         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2188                 if (workaround)
2189                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2190                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2191                 udelay(5);
2192                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2193
2194                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2195         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2196                                  MAC_STATUS_SIGNAL_DET)) {
2197                 int i;
2198
2199                 /* Giver time to negotiate (~200ms) */
2200                 for (i = 0; i < 40000; i++) {
2201                         sg_dig_status = tr32(SG_DIG_STATUS);
2202                         if (sg_dig_status & (0x3))
2203                                 break;
2204                         udelay(5);
2205                 }
2206                 mac_status = tr32(MAC_STATUS);
2207
2208                 if ((sg_dig_status & (1 << 1)) &&
2209                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2210                         u32 local_adv, remote_adv;
2211
2212                         local_adv = ADVERTISE_PAUSE_CAP;
2213                         remote_adv = 0;
2214                         if (sg_dig_status & (1 << 19))
2215                                 remote_adv |= LPA_PAUSE_CAP;
2216                         if (sg_dig_status & (1 << 20))
2217                                 remote_adv |= LPA_PAUSE_ASYM;
2218
2219                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2220                         current_link_up = 1;
2221                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2222                 } else if (!(sg_dig_status & (1 << 1))) {
2223                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2224                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2225                         else {
2226                                 if (workaround) {
2227                                         u32 val = serdes_cfg;
2228
2229                                         if (port_a)
2230                                                 val |= 0xc010000;
2231                                         else
2232                                                 val |= 0x4010000;
2233
2234                                         tw32_f(MAC_SERDES_CFG, val);
2235                                 }
2236
2237                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2238                                 udelay(40);
2239
2240                                 /* Link parallel detection - link is up */
2241                                 /* only if we have PCS_SYNC and not */
2242                                 /* receiving config code words */
2243                                 mac_status = tr32(MAC_STATUS);
2244                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2245                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2246                                         tg3_setup_flow_control(tp, 0, 0);
2247                                         current_link_up = 1;
2248                                 }
2249                         }
2250                 }
2251         }
2252
2253 out:
2254         return current_link_up;
2255 }
2256
2257 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2258 {
2259         int current_link_up = 0;
2260
2261         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2262                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2263                 goto out;
2264         }
2265
2266         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2267                 u32 flags;
2268                 int i;
2269   
2270                 if (fiber_autoneg(tp, &flags)) {
2271                         u32 local_adv, remote_adv;
2272
2273                         local_adv = ADVERTISE_PAUSE_CAP;
2274                         remote_adv = 0;
2275                         if (flags & MR_LP_ADV_SYM_PAUSE)
2276                                 remote_adv |= LPA_PAUSE_CAP;
2277                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2278                                 remote_adv |= LPA_PAUSE_ASYM;
2279
2280                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2281
2282                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2283                         current_link_up = 1;
2284                 }
2285                 for (i = 0; i < 30; i++) {
2286                         udelay(20);
2287                         tw32_f(MAC_STATUS,
2288                                (MAC_STATUS_SYNC_CHANGED |
2289                                 MAC_STATUS_CFG_CHANGED));
2290                         udelay(40);
2291                         if ((tr32(MAC_STATUS) &
2292                              (MAC_STATUS_SYNC_CHANGED |
2293                               MAC_STATUS_CFG_CHANGED)) == 0)
2294                                 break;
2295                 }
2296
2297                 mac_status = tr32(MAC_STATUS);
2298                 if (current_link_up == 0 &&
2299                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2300                     !(mac_status & MAC_STATUS_RCVD_CFG))
2301                         current_link_up = 1;
2302         } else {
2303                 /* Forcing 1000FD link up. */
2304                 current_link_up = 1;
2305                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2306
2307                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2308                 udelay(40);
2309         }
2310
2311 out:
2312         return current_link_up;
2313 }
2314
2315 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2316 {
2317         u32 orig_pause_cfg;
2318         u16 orig_active_speed;
2319         u8 orig_active_duplex;
2320         u32 mac_status;
2321         int current_link_up;
2322         int i;
2323
2324         orig_pause_cfg =
2325                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2326                                   TG3_FLAG_TX_PAUSE));
2327         orig_active_speed = tp->link_config.active_speed;
2328         orig_active_duplex = tp->link_config.active_duplex;
2329
2330         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2331             netif_carrier_ok(tp->dev) &&
2332             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2333                 mac_status = tr32(MAC_STATUS);
2334                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2335                                MAC_STATUS_SIGNAL_DET |
2336                                MAC_STATUS_CFG_CHANGED |
2337                                MAC_STATUS_RCVD_CFG);
2338                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2339                                    MAC_STATUS_SIGNAL_DET)) {
2340                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2341                                             MAC_STATUS_CFG_CHANGED));
2342                         return 0;
2343                 }
2344         }
2345
2346         tw32_f(MAC_TX_AUTO_NEG, 0);
2347
2348         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2349         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2350         tw32_f(MAC_MODE, tp->mac_mode);
2351         udelay(40);
2352
2353         if (tp->phy_id == PHY_ID_BCM8002)
2354                 tg3_init_bcm8002(tp);
2355
2356         /* Enable link change event even when serdes polling.  */
2357         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2358         udelay(40);
2359
2360         current_link_up = 0;
2361         mac_status = tr32(MAC_STATUS);
2362
2363         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2364                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2365         else
2366                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2367
2368         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2369         tw32_f(MAC_MODE, tp->mac_mode);
2370         udelay(40);
2371
2372         tp->hw_status->status =
2373                 (SD_STATUS_UPDATED |
2374                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2375
2376         for (i = 0; i < 100; i++) {
2377                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2378                                     MAC_STATUS_CFG_CHANGED));
2379                 udelay(5);
2380                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2381                                          MAC_STATUS_CFG_CHANGED)) == 0)
2382                         break;
2383         }
2384
2385         mac_status = tr32(MAC_STATUS);
2386         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2387                 current_link_up = 0;
2388                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2389                         tw32_f(MAC_MODE, (tp->mac_mode |
2390                                           MAC_MODE_SEND_CONFIGS));
2391                         udelay(1);
2392                         tw32_f(MAC_MODE, tp->mac_mode);
2393                 }
2394         }
2395
2396         if (current_link_up == 1) {
2397                 tp->link_config.active_speed = SPEED_1000;
2398                 tp->link_config.active_duplex = DUPLEX_FULL;
2399                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2400                                     LED_CTRL_LNKLED_OVERRIDE |
2401                                     LED_CTRL_1000MBPS_ON));
2402         } else {
2403                 tp->link_config.active_speed = SPEED_INVALID;
2404                 tp->link_config.active_duplex = DUPLEX_INVALID;
2405                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2406                                     LED_CTRL_LNKLED_OVERRIDE |
2407                                     LED_CTRL_TRAFFIC_OVERRIDE));
2408         }
2409
2410         if (current_link_up != netif_carrier_ok(tp->dev)) {
2411                 if (current_link_up)
2412                         netif_carrier_on(tp->dev);
2413                 else
2414                         netif_carrier_off(tp->dev);
2415                 tg3_link_report(tp);
2416         } else {
2417                 u32 now_pause_cfg =
2418                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2419                                          TG3_FLAG_TX_PAUSE);
2420                 if (orig_pause_cfg != now_pause_cfg ||
2421                     orig_active_speed != tp->link_config.active_speed ||
2422                     orig_active_duplex != tp->link_config.active_duplex)
2423                         tg3_link_report(tp);
2424         }
2425
2426         return 0;
2427 }
2428
2429 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2430 {
2431         int err;
2432
2433         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2434                 err = tg3_setup_fiber_phy(tp, force_reset);
2435         } else {
2436                 err = tg3_setup_copper_phy(tp, force_reset);
2437         }
2438
2439         if (tp->link_config.active_speed == SPEED_1000 &&
2440             tp->link_config.active_duplex == DUPLEX_HALF)
2441                 tw32(MAC_TX_LENGTHS,
2442                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2443                       (6 << TX_LENGTHS_IPG_SHIFT) |
2444                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2445         else
2446                 tw32(MAC_TX_LENGTHS,
2447                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2448                       (6 << TX_LENGTHS_IPG_SHIFT) |
2449                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2450
2451         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2452             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2453                 if (netif_carrier_ok(tp->dev)) {
2454                         tw32(HOSTCC_STAT_COAL_TICKS,
2455                              DEFAULT_STAT_COAL_TICKS);
2456                 } else {
2457                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2458                 }
2459         }
2460
2461         return err;
2462 }
2463
2464 /* Tigon3 never reports partial packet sends.  So we do not
2465  * need special logic to handle SKBs that have not had all
2466  * of their frags sent yet, like SunGEM does.
2467  */
2468 static void tg3_tx(struct tg3 *tp)
2469 {
2470         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2471         u32 sw_idx = tp->tx_cons;
2472
2473         while (sw_idx != hw_idx) {
2474                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2475                 struct sk_buff *skb = ri->skb;
2476                 int i;
2477
2478                 if (unlikely(skb == NULL))
2479                         BUG();
2480
2481                 pci_unmap_single(tp->pdev,
2482                                  pci_unmap_addr(ri, mapping),
2483                                  skb_headlen(skb),
2484                                  PCI_DMA_TODEVICE);
2485
2486                 ri->skb = NULL;
2487
2488                 sw_idx = NEXT_TX(sw_idx);
2489
2490                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2491                         if (unlikely(sw_idx == hw_idx))
2492                                 BUG();
2493
2494                         ri = &tp->tx_buffers[sw_idx];
2495                         if (unlikely(ri->skb != NULL))
2496                                 BUG();
2497
2498                         pci_unmap_page(tp->pdev,
2499                                        pci_unmap_addr(ri, mapping),
2500                                        skb_shinfo(skb)->frags[i].size,
2501                                        PCI_DMA_TODEVICE);
2502
2503                         sw_idx = NEXT_TX(sw_idx);
2504                 }
2505
2506                 dev_kfree_skb_irq(skb);
2507         }
2508
2509         tp->tx_cons = sw_idx;
2510
2511         if (netif_queue_stopped(tp->dev) &&
2512             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2513                 netif_wake_queue(tp->dev);
2514 }
2515
2516 /* Returns size of skb allocated or < 0 on error.
2517  *
2518  * We only need to fill in the address because the other members
2519  * of the RX descriptor are invariant, see tg3_init_rings.
2520  *
2521  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2522  * posting buffers we only dirty the first cache line of the RX
2523  * descriptor (containing the address).  Whereas for the RX status
2524  * buffers the cpu only reads the last cacheline of the RX descriptor
2525  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2526  */
2527 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2528                             int src_idx, u32 dest_idx_unmasked)
2529 {
2530         struct tg3_rx_buffer_desc *desc;
2531         struct ring_info *map, *src_map;
2532         struct sk_buff *skb;
2533         dma_addr_t mapping;
2534         int skb_size, dest_idx;
2535
2536         src_map = NULL;
2537         switch (opaque_key) {
2538         case RXD_OPAQUE_RING_STD:
2539                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2540                 desc = &tp->rx_std[dest_idx];
2541                 map = &tp->rx_std_buffers[dest_idx];
2542                 if (src_idx >= 0)
2543                         src_map = &tp->rx_std_buffers[src_idx];
2544                 skb_size = RX_PKT_BUF_SZ;
2545                 break;
2546
2547         case RXD_OPAQUE_RING_JUMBO:
2548                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2549                 desc = &tp->rx_jumbo[dest_idx];
2550                 map = &tp->rx_jumbo_buffers[dest_idx];
2551                 if (src_idx >= 0)
2552                         src_map = &tp->rx_jumbo_buffers[src_idx];
2553                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2554                 break;
2555
2556         default:
2557                 return -EINVAL;
2558         };
2559
2560         /* Do not overwrite any of the map or rp information
2561          * until we are sure we can commit to a new buffer.
2562          *
2563          * Callers depend upon this behavior and assume that
2564          * we leave everything unchanged if we fail.
2565          */
2566         skb = dev_alloc_skb(skb_size);
2567         if (skb == NULL)
2568                 return -ENOMEM;
2569
2570         skb->dev = tp->dev;
2571         skb_reserve(skb, tp->rx_offset);
2572
2573         mapping = pci_map_single(tp->pdev, skb->data,
2574                                  skb_size - tp->rx_offset,
2575                                  PCI_DMA_FROMDEVICE);
2576
2577         map->skb = skb;
2578         pci_unmap_addr_set(map, mapping, mapping);
2579
2580         if (src_map != NULL)
2581                 src_map->skb = NULL;
2582
2583         desc->addr_hi = ((u64)mapping >> 32);
2584         desc->addr_lo = ((u64)mapping & 0xffffffff);
2585
2586         return skb_size;
2587 }
2588
2589 /* We only need to move over in the address because the other
2590  * members of the RX descriptor are invariant.  See notes above
2591  * tg3_alloc_rx_skb for full details.
2592  */
2593 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2594                            int src_idx, u32 dest_idx_unmasked)
2595 {
2596         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2597         struct ring_info *src_map, *dest_map;
2598         int dest_idx;
2599
2600         switch (opaque_key) {
2601         case RXD_OPAQUE_RING_STD:
2602                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2603                 dest_desc = &tp->rx_std[dest_idx];
2604                 dest_map = &tp->rx_std_buffers[dest_idx];
2605                 src_desc = &tp->rx_std[src_idx];
2606                 src_map = &tp->rx_std_buffers[src_idx];
2607                 break;
2608
2609         case RXD_OPAQUE_RING_JUMBO:
2610                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2611                 dest_desc = &tp->rx_jumbo[dest_idx];
2612                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2613                 src_desc = &tp->rx_jumbo[src_idx];
2614                 src_map = &tp->rx_jumbo_buffers[src_idx];
2615                 break;
2616
2617         default:
2618                 return;
2619         };
2620
2621         dest_map->skb = src_map->skb;
2622         pci_unmap_addr_set(dest_map, mapping,
2623                            pci_unmap_addr(src_map, mapping));
2624         dest_desc->addr_hi = src_desc->addr_hi;
2625         dest_desc->addr_lo = src_desc->addr_lo;
2626
2627         src_map->skb = NULL;
2628 }
2629
2630 #if TG3_VLAN_TAG_USED
2631 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2632 {
2633         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2634 }
2635 #endif
2636
2637 /* The RX ring scheme is composed of multiple rings which post fresh
2638  * buffers to the chip, and one special ring the chip uses to report
2639  * status back to the host.
2640  *
2641  * The special ring reports the status of received packets to the
2642  * host.  The chip does not write into the original descriptor the
2643  * RX buffer was obtained from.  The chip simply takes the original
2644  * descriptor as provided by the host, updates the status and length
2645  * field, then writes this into the next status ring entry.
2646  *
2647  * Each ring the host uses to post buffers to the chip is described
2648  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2649  * it is first placed into the on-chip ram.  When the packet's length
2650  * is known, it walks down the TG3_BDINFO entries to select the ring.
2651  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2652  * which is within the range of the new packet's length is chosen.
2653  *
2654  * The "separate ring for rx status" scheme may sound queer, but it makes
2655  * sense from a cache coherency perspective.  If only the host writes
2656  * to the buffer post rings, and only the chip writes to the rx status
2657  * rings, then cache lines never move beyond shared-modified state.
2658  * If both the host and chip were to write into the same ring, cache line
2659  * eviction could occur since both entities want it in an exclusive state.
2660  */
2661 static int tg3_rx(struct tg3 *tp, int budget)
2662 {
2663         u32 work_mask;
2664         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2665         u16 hw_idx, sw_idx;
2666         int received;
2667
2668         hw_idx = tp->hw_status->idx[0].rx_producer;
2669         /*
2670          * We need to order the read of hw_idx and the read of
2671          * the opaque cookie.
2672          */
2673         rmb();
2674         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2675         work_mask = 0;
2676         received = 0;
2677         while (sw_idx != hw_idx && budget > 0) {
2678                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2679                 unsigned int len;
2680                 struct sk_buff *skb;
2681                 dma_addr_t dma_addr;
2682                 u32 opaque_key, desc_idx, *post_ptr;
2683
2684                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2685                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2686                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2687                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2688                                                   mapping);
2689                         skb = tp->rx_std_buffers[desc_idx].skb;
2690                         post_ptr = &tp->rx_std_ptr;
2691                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2692                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2693                                                   mapping);
2694                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2695                         post_ptr = &tp->rx_jumbo_ptr;
2696                 }
2697                 else {
2698                         goto next_pkt_nopost;
2699                 }
2700
2701                 work_mask |= opaque_key;
2702
2703                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2704                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2705                 drop_it:
2706                         tg3_recycle_rx(tp, opaque_key,
2707                                        desc_idx, *post_ptr);
2708                 drop_it_no_recycle:
2709                         /* Other statistics kept track of by card. */
2710                         tp->net_stats.rx_dropped++;
2711                         goto next_pkt;
2712                 }
2713
2714                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2715
2716                 if (len > RX_COPY_THRESHOLD 
2717                         && tp->rx_offset == 2
2718                         /* rx_offset != 2 iff this is a 5701 card running
2719                          * in PCI-X mode [see tg3_get_invariants()] */
2720                 ) {
2721                         int skb_size;
2722
2723                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2724                                                     desc_idx, *post_ptr);
2725                         if (skb_size < 0)
2726                                 goto drop_it;
2727
2728                         pci_unmap_single(tp->pdev, dma_addr,
2729                                          skb_size - tp->rx_offset,
2730                                          PCI_DMA_FROMDEVICE);
2731
2732                         skb_put(skb, len);
2733                 } else {
2734                         struct sk_buff *copy_skb;
2735
2736                         tg3_recycle_rx(tp, opaque_key,
2737                                        desc_idx, *post_ptr);
2738
2739                         copy_skb = dev_alloc_skb(len + 2);
2740                         if (copy_skb == NULL)
2741                                 goto drop_it_no_recycle;
2742
2743                         copy_skb->dev = tp->dev;
2744                         skb_reserve(copy_skb, 2);
2745                         skb_put(copy_skb, len);
2746                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2747                         memcpy(copy_skb->data, skb->data, len);
2748                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2749
2750                         /* We'll reuse the original ring buffer. */
2751                         skb = copy_skb;
2752                 }
2753
2754                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2755                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2756                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2757                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2758                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2759                 else
2760                         skb->ip_summed = CHECKSUM_NONE;
2761
2762                 skb->protocol = eth_type_trans(skb, tp->dev);
2763 #if TG3_VLAN_TAG_USED
2764                 if (tp->vlgrp != NULL &&
2765                     desc->type_flags & RXD_FLAG_VLAN) {
2766                         tg3_vlan_rx(tp, skb,
2767                                     desc->err_vlan & RXD_VLAN_MASK);
2768                 } else
2769 #endif
2770                         netif_receive_skb(skb);
2771
2772                 tp->dev->last_rx = jiffies;
2773                 received++;
2774                 budget--;
2775
2776 next_pkt:
2777                 (*post_ptr)++;
2778 next_pkt_nopost:
2779                 rx_rcb_ptr++;
2780                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2781         }
2782
2783         /* ACK the status ring. */
2784         tp->rx_rcb_ptr = rx_rcb_ptr;
2785         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2786                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2787
2788         /* Refill RX ring(s). */
2789         if (work_mask & RXD_OPAQUE_RING_STD) {
2790                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2791                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2792                              sw_idx);
2793         }
2794         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2795                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2796                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2797                              sw_idx);
2798         }
2799         mmiowb();
2800
2801         return received;
2802 }
2803
2804 static int tg3_poll(struct net_device *netdev, int *budget)
2805 {
2806         struct tg3 *tp = netdev_priv(netdev);
2807         struct tg3_hw_status *sblk = tp->hw_status;
2808         unsigned long flags;
2809         int done;
2810
2811         spin_lock_irqsave(&tp->lock, flags);
2812
2813         /* handle link change and other phy events */
2814         if (!(tp->tg3_flags &
2815               (TG3_FLAG_USE_LINKCHG_REG |
2816                TG3_FLAG_POLL_SERDES))) {
2817                 if (sblk->status & SD_STATUS_LINK_CHG) {
2818                         sblk->status = SD_STATUS_UPDATED |
2819                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2820                         tg3_setup_phy(tp, 0);
2821                 }
2822         }
2823
2824         /* run TX completion thread */
2825         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2826                 spin_lock(&tp->tx_lock);
2827                 tg3_tx(tp);
2828                 spin_unlock(&tp->tx_lock);
2829         }
2830
2831         spin_unlock_irqrestore(&tp->lock, flags);
2832
2833         /* run RX thread, within the bounds set by NAPI.
2834          * All RX "locking" is done by ensuring outside
2835          * code synchronizes with dev->poll()
2836          */
2837         done = 1;
2838         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2839                 int orig_budget = *budget;
2840                 int work_done;
2841
2842                 if (orig_budget > netdev->quota)
2843                         orig_budget = netdev->quota;
2844
2845                 work_done = tg3_rx(tp, orig_budget);
2846
2847                 *budget -= work_done;
2848                 netdev->quota -= work_done;
2849
2850                 if (work_done >= orig_budget)
2851                         done = 0;
2852         }
2853
2854         /* if no more work, tell net stack and NIC we're done */
2855         if (done) {
2856                 spin_lock_irqsave(&tp->lock, flags);
2857                 __netif_rx_complete(netdev);
2858                 tg3_restart_ints(tp);
2859                 spin_unlock_irqrestore(&tp->lock, flags);
2860         }
2861
2862         return (done ? 0 : 1);
2863 }
2864
2865 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2866 {
2867         struct tg3_hw_status *sblk = tp->hw_status;
2868         unsigned int work_exists = 0;
2869
2870         /* check for phy events */
2871         if (!(tp->tg3_flags &
2872               (TG3_FLAG_USE_LINKCHG_REG |
2873                TG3_FLAG_POLL_SERDES))) {
2874                 if (sblk->status & SD_STATUS_LINK_CHG)
2875                         work_exists = 1;
2876         }
2877         /* check for RX/TX work to do */
2878         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2879             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2880                 work_exists = 1;
2881
2882         return work_exists;
2883 }
2884
2885 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2886 {
2887         struct net_device *dev = dev_id;
2888         struct tg3 *tp = netdev_priv(dev);
2889         struct tg3_hw_status *sblk = tp->hw_status;
2890         unsigned long flags;
2891         unsigned int handled = 1;
2892
2893         spin_lock_irqsave(&tp->lock, flags);
2894
2895         if (sblk->status & SD_STATUS_UPDATED) {
2896                 /*
2897                  * writing any value to intr-mbox-0 clears PCI INTA# and
2898                  * chip-internal interrupt pending events.
2899                  * writing non-zero to intr-mbox-0 additional tells the
2900                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2901                  * event coalescing.
2902                  */
2903                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2904                              0x00000001);
2905                 /*
2906                  * Flush PCI write.  This also guarantees that our
2907                  * status block has been flushed to host memory.
2908                  */
2909                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2910                 sblk->status &= ~SD_STATUS_UPDATED;
2911
2912                 if (likely(tg3_has_work(dev, tp)))
2913                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2914                 else {
2915                         /* no work, shared interrupt perhaps?  re-enable
2916                          * interrupts, and flush that PCI write
2917                          */
2918                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2919                                 0x00000000);
2920                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2921                 }
2922         } else {        /* shared interrupt */
2923                 handled = 0;
2924         }
2925
2926         spin_unlock_irqrestore(&tp->lock, flags);
2927
2928         return IRQ_RETVAL(handled);
2929 }
2930
2931 static int tg3_init_hw(struct tg3 *);
2932 static int tg3_halt(struct tg3 *);
2933
2934 #ifdef CONFIG_NET_POLL_CONTROLLER
2935 static void tg3_poll_controller(struct net_device *dev)
2936 {
2937         tg3_interrupt(dev->irq, dev, NULL);
2938 }
2939 #endif
2940
2941 static void tg3_reset_task(void *_data)
2942 {
2943         struct tg3 *tp = _data;
2944         unsigned int restart_timer;
2945
2946         tg3_netif_stop(tp);
2947
2948         spin_lock_irq(&tp->lock);
2949         spin_lock(&tp->tx_lock);
2950
2951         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2952         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2953
2954         tg3_halt(tp);
2955         tg3_init_hw(tp);
2956
2957         tg3_netif_start(tp);
2958
2959         spin_unlock(&tp->tx_lock);
2960         spin_unlock_irq(&tp->lock);
2961
2962         if (restart_timer)
2963                 mod_timer(&tp->timer, jiffies + 1);
2964 }
2965
2966 static void tg3_tx_timeout(struct net_device *dev)
2967 {
2968         struct tg3 *tp = netdev_priv(dev);
2969
2970         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2971                dev->name);
2972
2973         schedule_work(&tp->reset_task);
2974 }
2975
2976 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2977
2978 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2979                                        u32 guilty_entry, int guilty_len,
2980                                        u32 last_plus_one, u32 *start, u32 mss)
2981 {
2982         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2983         dma_addr_t new_addr;
2984         u32 entry = *start;
2985         int i;
2986
2987         if (!new_skb) {
2988                 dev_kfree_skb(skb);
2989                 return -1;
2990         }
2991
2992         /* New SKB is guaranteed to be linear. */
2993         entry = *start;
2994         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2995                                   PCI_DMA_TODEVICE);
2996         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2997                     (skb->ip_summed == CHECKSUM_HW) ?
2998                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2999         *start = NEXT_TX(entry);
3000
3001         /* Now clean up the sw ring entries. */
3002         i = 0;
3003         while (entry != last_plus_one) {
3004                 int len;
3005
3006                 if (i == 0)
3007                         len = skb_headlen(skb);
3008                 else
3009                         len = skb_shinfo(skb)->frags[i-1].size;
3010                 pci_unmap_single(tp->pdev,
3011                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3012                                  len, PCI_DMA_TODEVICE);
3013                 if (i == 0) {
3014                         tp->tx_buffers[entry].skb = new_skb;
3015                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3016                 } else {
3017                         tp->tx_buffers[entry].skb = NULL;
3018                 }
3019                 entry = NEXT_TX(entry);
3020         }
3021
3022         dev_kfree_skb(skb);
3023
3024         return 0;
3025 }
3026
3027 static void tg3_set_txd(struct tg3 *tp, int entry,
3028                         dma_addr_t mapping, int len, u32 flags,
3029                         u32 mss_and_is_end)
3030 {
3031         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3032         int is_end = (mss_and_is_end & 0x1);
3033         u32 mss = (mss_and_is_end >> 1);
3034         u32 vlan_tag = 0;
3035
3036         if (is_end)
3037                 flags |= TXD_FLAG_END;
3038         if (flags & TXD_FLAG_VLAN) {
3039                 vlan_tag = flags >> 16;
3040                 flags &= 0xffff;
3041         }
3042         vlan_tag |= (mss << TXD_MSS_SHIFT);
3043
3044         txd->addr_hi = ((u64) mapping >> 32);
3045         txd->addr_lo = ((u64) mapping & 0xffffffff);
3046         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3047         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3048 }
3049
3050 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3051 {
3052         u32 base = (u32) mapping & 0xffffffff;
3053
3054         return ((base > 0xffffdcc0) &&
3055                 (base + len + 8 < base));
3056 }
3057
3058 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3059 {
3060         struct tg3 *tp = netdev_priv(dev);
3061         dma_addr_t mapping;
3062         unsigned int i;
3063         u32 len, entry, base_flags, mss;
3064         int would_hit_hwbug;
3065         unsigned long flags;
3066
3067         len = skb_headlen(skb);
3068
3069         /* No BH disabling for tx_lock here.  We are running in BH disabled
3070          * context and TX reclaim runs via tp->poll inside of a software
3071          * interrupt.  Rejoice!
3072          *
3073          * Actually, things are not so simple.  If we are to take a hw
3074          * IRQ here, we can deadlock, consider:
3075          *
3076          *       CPU1           CPU2
3077          *   tg3_start_xmit
3078          *   take tp->tx_lock
3079          *                      tg3_timer
3080          *                      take tp->lock
3081          *   tg3_interrupt
3082          *   spin on tp->lock
3083          *                      spin on tp->tx_lock
3084          *
3085          * So we really do need to disable interrupts when taking
3086          * tx_lock here.
3087          */
3088         local_irq_save(flags);
3089         if (!spin_trylock(&tp->tx_lock)) { 
3090                 local_irq_restore(flags);
3091                 return NETDEV_TX_LOCKED; 
3092         } 
3093
3094         /* This is a hard error, log it. */
3095         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3096                 netif_stop_queue(dev);
3097                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3098                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3099                        dev->name);
3100                 return NETDEV_TX_BUSY;
3101         }
3102
3103         entry = tp->tx_prod;
3104         base_flags = 0;
3105         if (skb->ip_summed == CHECKSUM_HW)
3106                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3107 #if TG3_TSO_SUPPORT != 0
3108         mss = 0;
3109         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3110             (mss = skb_shinfo(skb)->tso_size) != 0) {
3111                 int tcp_opt_len, ip_tcp_len;
3112
3113                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3114                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3115
3116                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3117                                TXD_FLAG_CPU_POST_DMA);
3118
3119                 skb->nh.iph->check = 0;
3120                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3121                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3122                         skb->h.th->check = 0;
3123                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3124                 }
3125                 else {
3126                         skb->h.th->check =
3127                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3128                                                    skb->nh.iph->daddr,
3129                                                    0, IPPROTO_TCP, 0);
3130                 }
3131
3132                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3133                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3134                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3135                                 int tsflags;
3136
3137                                 tsflags = ((skb->nh.iph->ihl - 5) +
3138                                            (tcp_opt_len >> 2));
3139                                 mss |= (tsflags << 11);
3140                         }
3141                 } else {
3142                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3143                                 int tsflags;
3144
3145                                 tsflags = ((skb->nh.iph->ihl - 5) +
3146                                            (tcp_opt_len >> 2));
3147                                 base_flags |= tsflags << 12;
3148                         }
3149                 }
3150         }
3151 #else
3152         mss = 0;
3153 #endif
3154 #if TG3_VLAN_TAG_USED
3155         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3156                 base_flags |= (TXD_FLAG_VLAN |
3157                                (vlan_tx_tag_get(skb) << 16));
3158 #endif
3159
3160         /* Queue skb data, a.k.a. the main skb fragment. */
3161         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3162
3163         tp->tx_buffers[entry].skb = skb;
3164         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3165
3166         would_hit_hwbug = 0;
3167
3168         if (tg3_4g_overflow_test(mapping, len))
3169                 would_hit_hwbug = entry + 1;
3170
3171         tg3_set_txd(tp, entry, mapping, len, base_flags,
3172                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3173
3174         entry = NEXT_TX(entry);
3175
3176         /* Now loop through additional data fragments, and queue them. */
3177         if (skb_shinfo(skb)->nr_frags > 0) {
3178                 unsigned int i, last;
3179
3180                 last = skb_shinfo(skb)->nr_frags - 1;
3181                 for (i = 0; i <= last; i++) {
3182                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3183
3184                         len = frag->size;
3185                         mapping = pci_map_page(tp->pdev,
3186                                                frag->page,
3187                                                frag->page_offset,
3188                                                len, PCI_DMA_TODEVICE);
3189
3190                         tp->tx_buffers[entry].skb = NULL;
3191                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3192
3193                         if (tg3_4g_overflow_test(mapping, len)) {
3194                                 /* Only one should match. */
3195                                 if (would_hit_hwbug)
3196                                         BUG();
3197                                 would_hit_hwbug = entry + 1;
3198                         }
3199
3200                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3201                                 tg3_set_txd(tp, entry, mapping, len,
3202                                             base_flags, (i == last)|(mss << 1));
3203                         else
3204                                 tg3_set_txd(tp, entry, mapping, len,
3205                                             base_flags, (i == last));
3206
3207                         entry = NEXT_TX(entry);
3208                 }
3209         }
3210
3211         if (would_hit_hwbug) {
3212                 u32 last_plus_one = entry;
3213                 u32 start;
3214                 unsigned int len = 0;
3215
3216                 would_hit_hwbug -= 1;
3217                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3218                 entry &= (TG3_TX_RING_SIZE - 1);
3219                 start = entry;
3220                 i = 0;
3221                 while (entry != last_plus_one) {
3222                         if (i == 0)
3223                                 len = skb_headlen(skb);
3224                         else
3225                                 len = skb_shinfo(skb)->frags[i-1].size;
3226
3227                         if (entry == would_hit_hwbug)
3228                                 break;
3229
3230                         i++;
3231                         entry = NEXT_TX(entry);
3232
3233                 }
3234
3235                 /* If the workaround fails due to memory/mapping
3236                  * failure, silently drop this packet.
3237                  */
3238                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3239                                                 entry, len,
3240                                                 last_plus_one,
3241                                                 &start, mss))
3242                         goto out_unlock;
3243
3244                 entry = start;
3245         }
3246
3247         /* Packets are ready, update Tx producer idx local and on card. */
3248         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3249
3250         tp->tx_prod = entry;
3251         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3252                 netif_stop_queue(dev);
3253
3254 out_unlock:
3255         mmiowb();
3256         spin_unlock_irqrestore(&tp->tx_lock, flags);
3257
3258         dev->trans_start = jiffies;
3259
3260         return NETDEV_TX_OK;
3261 }
3262
3263 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3264                                int new_mtu)
3265 {
3266         dev->mtu = new_mtu;
3267
3268         if (new_mtu > ETH_DATA_LEN)
3269                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3270         else
3271                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3272 }
3273
3274 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3275 {
3276         struct tg3 *tp = netdev_priv(dev);
3277
3278         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3279                 return -EINVAL;
3280
3281         if (!netif_running(dev)) {
3282                 /* We'll just catch it later when the
3283                  * device is up'd.
3284                  */
3285                 tg3_set_mtu(dev, tp, new_mtu);
3286                 return 0;
3287         }
3288
3289         tg3_netif_stop(tp);
3290         spin_lock_irq(&tp->lock);
3291         spin_lock(&tp->tx_lock);
3292
3293         tg3_halt(tp);
3294
3295         tg3_set_mtu(dev, tp, new_mtu);
3296
3297         tg3_init_hw(tp);
3298
3299         tg3_netif_start(tp);
3300
3301         spin_unlock(&tp->tx_lock);
3302         spin_unlock_irq(&tp->lock);
3303
3304         return 0;
3305 }
3306
3307 /* Free up pending packets in all rx/tx rings.
3308  *
3309  * The chip has been shut down and the driver detached from
3310  * the networking, so no interrupts or new tx packets will
3311  * end up in the driver.  tp->{tx,}lock is not held and we are not
3312  * in an interrupt context and thus may sleep.
3313  */
3314 static void tg3_free_rings(struct tg3 *tp)
3315 {
3316         struct ring_info *rxp;
3317         int i;
3318
3319         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3320                 rxp = &tp->rx_std_buffers[i];
3321
3322                 if (rxp->skb == NULL)
3323                         continue;
3324                 pci_unmap_single(tp->pdev,
3325                                  pci_unmap_addr(rxp, mapping),
3326                                  RX_PKT_BUF_SZ - tp->rx_offset,
3327                                  PCI_DMA_FROMDEVICE);
3328                 dev_kfree_skb_any(rxp->skb);
3329                 rxp->skb = NULL;
3330         }
3331
3332         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3333                 rxp = &tp->rx_jumbo_buffers[i];
3334
3335                 if (rxp->skb == NULL)
3336                         continue;
3337                 pci_unmap_single(tp->pdev,
3338                                  pci_unmap_addr(rxp, mapping),
3339                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3340                                  PCI_DMA_FROMDEVICE);
3341                 dev_kfree_skb_any(rxp->skb);
3342                 rxp->skb = NULL;
3343         }
3344
3345         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3346                 struct tx_ring_info *txp;
3347                 struct sk_buff *skb;
3348                 int j;
3349
3350                 txp = &tp->tx_buffers[i];
3351                 skb = txp->skb;
3352
3353                 if (skb == NULL) {
3354                         i++;
3355                         continue;
3356                 }
3357
3358                 pci_unmap_single(tp->pdev,
3359                                  pci_unmap_addr(txp, mapping),
3360                                  skb_headlen(skb),
3361                                  PCI_DMA_TODEVICE);
3362                 txp->skb = NULL;
3363
3364                 i++;
3365
3366                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3367                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3368                         pci_unmap_page(tp->pdev,
3369                                        pci_unmap_addr(txp, mapping),
3370                                        skb_shinfo(skb)->frags[j].size,
3371                                        PCI_DMA_TODEVICE);
3372                         i++;
3373                 }
3374
3375                 dev_kfree_skb_any(skb);
3376         }
3377 }
3378
3379 /* Initialize tx/rx rings for packet processing.
3380  *
3381  * The chip has been shut down and the driver detached from
3382  * the networking, so no interrupts or new tx packets will
3383  * end up in the driver.  tp->{tx,}lock are held and thus
3384  * we may not sleep.
3385  */
3386 static void tg3_init_rings(struct tg3 *tp)
3387 {
3388         u32 i;
3389
3390         /* Free up all the SKBs. */
3391         tg3_free_rings(tp);
3392
3393         /* Zero out all descriptors. */
3394         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3395         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3396         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3397         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3398
3399         /* Initialize invariants of the rings, we only set this
3400          * stuff once.  This works because the card does not
3401          * write into the rx buffer posting rings.
3402          */
3403         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3404                 struct tg3_rx_buffer_desc *rxd;
3405
3406                 rxd = &tp->rx_std[i];
3407                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3408                         << RXD_LEN_SHIFT;
3409                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3410                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3411                                (i << RXD_OPAQUE_INDEX_SHIFT));
3412         }
3413
3414         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3415                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3416                         struct tg3_rx_buffer_desc *rxd;
3417
3418                         rxd = &tp->rx_jumbo[i];
3419                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3420                                 << RXD_LEN_SHIFT;
3421                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3422                                 RXD_FLAG_JUMBO;
3423                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3424                                (i << RXD_OPAQUE_INDEX_SHIFT));
3425                 }
3426         }
3427
3428         /* Now allocate fresh SKBs for each rx ring. */
3429         for (i = 0; i < tp->rx_pending; i++) {
3430                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3431                                      -1, i) < 0)
3432                         break;
3433         }
3434
3435         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3436                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3437                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3438                                              -1, i) < 0)
3439                                 break;
3440                 }
3441         }
3442 }
3443
3444 /*
3445  * Must not be invoked with interrupt sources disabled and
3446  * the hardware shutdown down.
3447  */
3448 static void tg3_free_consistent(struct tg3 *tp)
3449 {
3450         if (tp->rx_std_buffers) {
3451                 kfree(tp->rx_std_buffers);
3452                 tp->rx_std_buffers = NULL;
3453         }
3454         if (tp->rx_std) {
3455                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3456                                     tp->rx_std, tp->rx_std_mapping);
3457                 tp->rx_std = NULL;
3458         }
3459         if (tp->rx_jumbo) {
3460                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3461                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3462                 tp->rx_jumbo = NULL;
3463         }
3464         if (tp->rx_rcb) {
3465                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3466                                     tp->rx_rcb, tp->rx_rcb_mapping);
3467                 tp->rx_rcb = NULL;
3468         }
3469         if (tp->tx_ring) {
3470                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3471                         tp->tx_ring, tp->tx_desc_mapping);
3472                 tp->tx_ring = NULL;
3473         }
3474         if (tp->hw_status) {
3475                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3476                                     tp->hw_status, tp->status_mapping);
3477                 tp->hw_status = NULL;
3478         }
3479         if (tp->hw_stats) {
3480                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3481                                     tp->hw_stats, tp->stats_mapping);
3482                 tp->hw_stats = NULL;
3483         }
3484 }
3485
3486 /*
3487  * Must not be invoked with interrupt sources disabled and
3488  * the hardware shutdown down.  Can sleep.
3489  */
3490 static int tg3_alloc_consistent(struct tg3 *tp)
3491 {
3492         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3493                                       (TG3_RX_RING_SIZE +
3494                                        TG3_RX_JUMBO_RING_SIZE)) +
3495                                      (sizeof(struct tx_ring_info) *
3496                                       TG3_TX_RING_SIZE),
3497                                      GFP_KERNEL);
3498         if (!tp->rx_std_buffers)
3499                 return -ENOMEM;
3500
3501         memset(tp->rx_std_buffers, 0,
3502                (sizeof(struct ring_info) *
3503                 (TG3_RX_RING_SIZE +
3504                  TG3_RX_JUMBO_RING_SIZE)) +
3505                (sizeof(struct tx_ring_info) *
3506                 TG3_TX_RING_SIZE));
3507
3508         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3509         tp->tx_buffers = (struct tx_ring_info *)
3510                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3511
3512         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3513                                           &tp->rx_std_mapping);
3514         if (!tp->rx_std)
3515                 goto err_out;
3516
3517         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3518                                             &tp->rx_jumbo_mapping);
3519
3520         if (!tp->rx_jumbo)
3521                 goto err_out;
3522
3523         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3524                                           &tp->rx_rcb_mapping);
3525         if (!tp->rx_rcb)
3526                 goto err_out;
3527
3528         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3529                                            &tp->tx_desc_mapping);
3530         if (!tp->tx_ring)
3531                 goto err_out;
3532
3533         tp->hw_status = pci_alloc_consistent(tp->pdev,
3534                                              TG3_HW_STATUS_SIZE,
3535                                              &tp->status_mapping);
3536         if (!tp->hw_status)
3537                 goto err_out;
3538
3539         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3540                                             sizeof(struct tg3_hw_stats),
3541                                             &tp->stats_mapping);
3542         if (!tp->hw_stats)
3543                 goto err_out;
3544
3545         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3546         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3547
3548         return 0;
3549
3550 err_out:
3551         tg3_free_consistent(tp);
3552         return -ENOMEM;
3553 }
3554
3555 #define MAX_WAIT_CNT 1000
3556
3557 /* To stop a block, clear the enable bit and poll till it
3558  * clears.  tp->lock is held.
3559  */
3560 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3561 {
3562         unsigned int i;
3563         u32 val;
3564
3565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3566             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3567                 switch (ofs) {
3568                 case RCVLSC_MODE:
3569                 case DMAC_MODE:
3570                 case MBFREE_MODE:
3571                 case BUFMGR_MODE:
3572                 case MEMARB_MODE:
3573                         /* We can't enable/disable these bits of the
3574                          * 5705/5750, just say success.
3575                          */
3576                         return 0;
3577
3578                 default:
3579                         break;
3580                 };
3581         }
3582
3583         val = tr32(ofs);
3584         val &= ~enable_bit;
3585         tw32_f(ofs, val);
3586
3587         for (i = 0; i < MAX_WAIT_CNT; i++) {
3588                 udelay(100);
3589                 val = tr32(ofs);
3590                 if ((val & enable_bit) == 0)
3591                         break;
3592         }
3593
3594         if (i == MAX_WAIT_CNT) {
3595                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3596                        "ofs=%lx enable_bit=%x\n",
3597                        ofs, enable_bit);
3598                 return -ENODEV;
3599         }
3600
3601         return 0;
3602 }
3603
3604 /* tp->lock is held. */
3605 static int tg3_abort_hw(struct tg3 *tp)
3606 {
3607         int i, err;
3608
3609         tg3_disable_ints(tp);
3610
3611         tp->rx_mode &= ~RX_MODE_ENABLE;
3612         tw32_f(MAC_RX_MODE, tp->rx_mode);
3613         udelay(10);
3614
3615         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3616         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3617         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3618         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3619         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3620         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3621
3622         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3623         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3624         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3625         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3626         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3627         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3628         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3629         if (err)
3630                 goto out;
3631
3632         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3633         tw32_f(MAC_MODE, tp->mac_mode);
3634         udelay(40);
3635
3636         tp->tx_mode &= ~TX_MODE_ENABLE;
3637         tw32_f(MAC_TX_MODE, tp->tx_mode);
3638
3639         for (i = 0; i < MAX_WAIT_CNT; i++) {
3640                 udelay(100);
3641                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3642                         break;
3643         }
3644         if (i >= MAX_WAIT_CNT) {
3645                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3646                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3647                        tp->dev->name, tr32(MAC_TX_MODE));
3648                 return -ENODEV;
3649         }
3650
3651         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3652         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3653         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3654
3655         tw32(FTQ_RESET, 0xffffffff);
3656         tw32(FTQ_RESET, 0x00000000);
3657
3658         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3659         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3660         if (err)
3661                 goto out;
3662
3663         if (tp->hw_status)
3664                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3665         if (tp->hw_stats)
3666                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3667
3668 out:
3669         return err;
3670 }
3671
3672 /* tp->lock is held. */
3673 static int tg3_nvram_lock(struct tg3 *tp)
3674 {
3675         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3676                 int i;
3677
3678                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3679                 for (i = 0; i < 8000; i++) {
3680                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3681                                 break;
3682                         udelay(20);
3683                 }
3684                 if (i == 8000)
3685                         return -ENODEV;
3686         }
3687         return 0;
3688 }
3689
3690 /* tp->lock is held. */
3691 static void tg3_nvram_unlock(struct tg3 *tp)
3692 {
3693         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3694                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3695 }
3696
3697 /* tp->lock is held. */
3698 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3699 {
3700         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3701                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3702
3703         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3704                 switch (kind) {
3705                 case RESET_KIND_INIT:
3706                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3707                                       DRV_STATE_START);
3708                         break;
3709
3710                 case RESET_KIND_SHUTDOWN:
3711                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3712                                       DRV_STATE_UNLOAD);
3713                         break;
3714
3715                 case RESET_KIND_SUSPEND:
3716                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3717                                       DRV_STATE_SUSPEND);
3718                         break;
3719
3720                 default:
3721                         break;
3722                 };
3723         }
3724 }
3725
3726 /* tp->lock is held. */
3727 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3728 {
3729         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3730                 switch (kind) {
3731                 case RESET_KIND_INIT:
3732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3733                                       DRV_STATE_START_DONE);
3734                         break;
3735
3736                 case RESET_KIND_SHUTDOWN:
3737                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3738                                       DRV_STATE_UNLOAD_DONE);
3739                         break;
3740
3741                 default:
3742                         break;
3743                 };
3744         }
3745 }
3746
3747 /* tp->lock is held. */
3748 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3749 {
3750         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3751                 switch (kind) {
3752                 case RESET_KIND_INIT:
3753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3754                                       DRV_STATE_START);
3755                         break;
3756
3757                 case RESET_KIND_SHUTDOWN:
3758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3759                                       DRV_STATE_UNLOAD);
3760                         break;
3761
3762                 case RESET_KIND_SUSPEND:
3763                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3764                                       DRV_STATE_SUSPEND);
3765                         break;
3766
3767                 default:
3768                         break;
3769                 };
3770         }
3771 }
3772
3773 static void tg3_stop_fw(struct tg3 *);
3774
3775 /* tp->lock is held. */
3776 static int tg3_chip_reset(struct tg3 *tp)
3777 {
3778         u32 val;
3779         u32 flags_save;
3780         int i;
3781
3782         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3783                 tg3_nvram_lock(tp);
3784
3785         /*
3786          * We must avoid the readl() that normally takes place.
3787          * It locks machines, causes machine checks, and other
3788          * fun things.  So, temporarily disable the 5701
3789          * hardware workaround, while we do the reset.
3790          */
3791         flags_save = tp->tg3_flags;
3792         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3793
3794         /* do the reset */
3795         val = GRC_MISC_CFG_CORECLK_RESET;
3796
3797         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3798                 if (tr32(0x7e2c) == 0x60) {
3799                         tw32(0x7e2c, 0x20);
3800                 }
3801                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3802                         tw32(GRC_MISC_CFG, (1 << 29));
3803                         val |= (1 << 29);
3804                 }
3805         }
3806
3807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3809                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3810         tw32(GRC_MISC_CFG, val);
3811
3812         /* restore 5701 hardware bug workaround flag */
3813         tp->tg3_flags = flags_save;
3814
3815         /* Unfortunately, we have to delay before the PCI read back.
3816          * Some 575X chips even will not respond to a PCI cfg access
3817          * when the reset command is given to the chip.
3818          *
3819          * How do these hardware designers expect things to work
3820          * properly if the PCI write is posted for a long period
3821          * of time?  It is always necessary to have some method by
3822          * which a register read back can occur to push the write
3823          * out which does the reset.
3824          *
3825          * For most tg3 variants the trick below was working.
3826          * Ho hum...
3827          */
3828         udelay(120);
3829
3830         /* Flush PCI posted writes.  The normal MMIO registers
3831          * are inaccessible at this time so this is the only
3832          * way to make this reliably (actually, this is no longer
3833          * the case, see above).  I tried to use indirect
3834          * register read/write but this upset some 5701 variants.
3835          */
3836         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3837
3838         udelay(120);
3839
3840         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3841                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3842                         int i;
3843                         u32 cfg_val;
3844
3845                         /* Wait for link training to complete.  */
3846                         for (i = 0; i < 5000; i++)
3847                                 udelay(100);
3848
3849                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3850                         pci_write_config_dword(tp->pdev, 0xc4,
3851                                                cfg_val | (1 << 15));
3852                 }
3853                 /* Set PCIE max payload size and clear error status.  */
3854                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3855         }
3856
3857         /* Re-enable indirect register accesses. */
3858         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3859                                tp->misc_host_ctrl);
3860
3861         /* Set MAX PCI retry to zero. */
3862         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3863         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3864             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3865                 val |= PCISTATE_RETRY_SAME_DMA;
3866         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3867
3868         pci_restore_state(tp->pdev);
3869
3870         /* Make sure PCI-X relaxed ordering bit is clear. */
3871         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3872         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3873         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3874
3875         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3876
3877         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3878                 tg3_stop_fw(tp);
3879                 tw32(0x5000, 0x400);
3880         }
3881
3882         tw32(GRC_MODE, tp->grc_mode);
3883
3884         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3885                 u32 val = tr32(0xc4);
3886
3887                 tw32(0xc4, val | (1 << 15));
3888         }
3889
3890         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3891             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3892                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3893                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3894                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3895                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3896         }
3897
3898         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3899                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3900                 tw32_f(MAC_MODE, tp->mac_mode);
3901         } else
3902                 tw32_f(MAC_MODE, 0);
3903         udelay(40);
3904
3905         /* Wait for firmware initialization to complete. */
3906         for (i = 0; i < 100000; i++) {
3907                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3908                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3909                         break;
3910                 udelay(10);
3911         }
3912         if (i >= 100000 &&
3913             !(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3914                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3915                        "firmware will not restart magic=%08x\n",
3916                        tp->dev->name, val);
3917                 return -ENODEV;
3918         }
3919
3920         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3921             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3922                 u32 val = tr32(0x7c00);
3923
3924                 tw32(0x7c00, val | (1 << 25));
3925         }
3926
3927         /* Reprobe ASF enable state.  */
3928         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3929         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3930         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3931         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3932                 u32 nic_cfg;
3933
3934                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3935                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3936                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3937                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3938                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3939                 }
3940         }
3941
3942         return 0;
3943 }
3944
3945 /* tp->lock is held. */
3946 static void tg3_stop_fw(struct tg3 *tp)
3947 {
3948         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3949                 u32 val;
3950                 int i;
3951
3952                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3953                 val = tr32(GRC_RX_CPU_EVENT);
3954                 val |= (1 << 14);
3955                 tw32(GRC_RX_CPU_EVENT, val);
3956
3957                 /* Wait for RX cpu to ACK the event.  */
3958                 for (i = 0; i < 100; i++) {
3959                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3960                                 break;
3961                         udelay(1);
3962                 }
3963         }
3964 }
3965
3966 /* tp->lock is held. */
3967 static int tg3_halt(struct tg3 *tp)
3968 {
3969         int err;
3970
3971         tg3_stop_fw(tp);
3972
3973         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3974
3975         tg3_abort_hw(tp);
3976         err = tg3_chip_reset(tp);
3977
3978         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3979         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3980
3981         if (err)
3982                 return err;
3983
3984         return 0;
3985 }
3986
3987 #define TG3_FW_RELEASE_MAJOR    0x0
3988 #define TG3_FW_RELASE_MINOR     0x0
3989 #define TG3_FW_RELEASE_FIX      0x0
3990 #define TG3_FW_START_ADDR       0x08000000
3991 #define TG3_FW_TEXT_ADDR        0x08000000
3992 #define TG3_FW_TEXT_LEN         0x9c0
3993 #define TG3_FW_RODATA_ADDR      0x080009c0
3994 #define TG3_FW_RODATA_LEN       0x60
3995 #define TG3_FW_DATA_ADDR        0x08000a40
3996 #define TG3_FW_DATA_LEN         0x20
3997 #define TG3_FW_SBSS_ADDR        0x08000a60
3998 #define TG3_FW_SBSS_LEN         0xc
3999 #define TG3_FW_BSS_ADDR         0x08000a70
4000 #define TG3_FW_BSS_LEN          0x10
4001
4002 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4003         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4004         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4005         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4006         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4007         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4008         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4009         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4010         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4011         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4012         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4013         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4014         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4015         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4016         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4017         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4018         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4019         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4020         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4021         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4022         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4023         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4024         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4025         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4026         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4027         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4028         0, 0, 0, 0, 0, 0,
4029         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4030         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4031         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4032         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4033         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4034         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4035         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4036         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4037         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4038         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4039         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4040         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4041         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4042         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4043         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4044         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4045         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4046         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4047         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4048         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4049         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4050         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4051         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4052         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4053         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4054         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4055         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4056         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4057         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4058         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4059         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4060         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4061         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4062         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4063         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4064         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4065         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4066         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4067         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4068         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4069         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4070         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4071         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4072         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4073         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4074         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4075         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4076         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4077         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4078         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4079         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4080         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4081         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4082         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4083         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4084         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4085         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4086         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4087         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4088         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4089         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4090         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4091         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4092         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4093         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4094 };
4095
4096 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4097         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4098         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4099         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4100         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4101         0x00000000
4102 };
4103
4104 #if 0 /* All zeros, don't eat up space with it. */
4105 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4106         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4107         0x00000000, 0x00000000, 0x00000000, 0x00000000
4108 };
4109 #endif
4110
4111 #define RX_CPU_SCRATCH_BASE     0x30000
4112 #define RX_CPU_SCRATCH_SIZE     0x04000
4113 #define TX_CPU_SCRATCH_BASE     0x34000
4114 #define TX_CPU_SCRATCH_SIZE     0x04000
4115
4116 /* tp->lock is held. */
4117 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4118 {
4119         int i;
4120
4121         if (offset == TX_CPU_BASE &&
4122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4123                 BUG();
4124
4125         if (offset == RX_CPU_BASE) {
4126                 for (i = 0; i < 10000; i++) {
4127                         tw32(offset + CPU_STATE, 0xffffffff);
4128                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4129                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4130                                 break;
4131                 }
4132
4133                 tw32(offset + CPU_STATE, 0xffffffff);
4134                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4135                 udelay(10);
4136         } else {
4137                 for (i = 0; i < 10000; i++) {
4138                         tw32(offset + CPU_STATE, 0xffffffff);
4139                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4140                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4141                                 break;
4142                 }
4143         }
4144
4145         if (i >= 10000) {
4146                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4147                        "and %s CPU\n",
4148                        tp->dev->name,
4149                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4150                 return -ENODEV;
4151         }
4152         return 0;
4153 }
4154
4155 struct fw_info {
4156         unsigned int text_base;
4157         unsigned int text_len;
4158         u32 *text_data;
4159         unsigned int rodata_base;
4160         unsigned int rodata_len;
4161         u32 *rodata_data;
4162         unsigned int data_base;
4163         unsigned int data_len;
4164         u32 *data_data;
4165 };
4166
4167 /* tp->lock is held. */
4168 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4169                                  int cpu_scratch_size, struct fw_info *info)
4170 {
4171         int err, i;
4172         u32 orig_tg3_flags = tp->tg3_flags;
4173         void (*write_op)(struct tg3 *, u32, u32);
4174
4175         if (cpu_base == TX_CPU_BASE &&
4176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4177                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4178                        "TX cpu firmware on %s which is 5705.\n",
4179                        tp->dev->name);
4180                 return -EINVAL;
4181         }
4182
4183         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4184                 write_op = tg3_write_mem;
4185         else
4186                 write_op = tg3_write_indirect_reg32;
4187
4188         /* Force use of PCI config space for indirect register
4189          * write calls.
4190          */
4191         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4192
4193         err = tg3_halt_cpu(tp, cpu_base);
4194         if (err)
4195                 goto out;
4196
4197         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4198                 write_op(tp, cpu_scratch_base + i, 0);
4199         tw32(cpu_base + CPU_STATE, 0xffffffff);
4200         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4201         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4202                 write_op(tp, (cpu_scratch_base +
4203                               (info->text_base & 0xffff) +
4204                               (i * sizeof(u32))),
4205                          (info->text_data ?
4206                           info->text_data[i] : 0));
4207         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4208                 write_op(tp, (cpu_scratch_base +
4209                               (info->rodata_base & 0xffff) +
4210                               (i * sizeof(u32))),
4211                          (info->rodata_data ?
4212                           info->rodata_data[i] : 0));
4213         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4214                 write_op(tp, (cpu_scratch_base +
4215                               (info->data_base & 0xffff) +
4216                               (i * sizeof(u32))),
4217                          (info->data_data ?
4218                           info->data_data[i] : 0));
4219
4220         err = 0;
4221
4222 out:
4223         tp->tg3_flags = orig_tg3_flags;
4224         return err;
4225 }
4226
4227 /* tp->lock is held. */
4228 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4229 {
4230         struct fw_info info;
4231         int err, i;
4232
4233         info.text_base = TG3_FW_TEXT_ADDR;
4234         info.text_len = TG3_FW_TEXT_LEN;
4235         info.text_data = &tg3FwText[0];
4236         info.rodata_base = TG3_FW_RODATA_ADDR;
4237         info.rodata_len = TG3_FW_RODATA_LEN;
4238         info.rodata_data = &tg3FwRodata[0];
4239         info.data_base = TG3_FW_DATA_ADDR;
4240         info.data_len = TG3_FW_DATA_LEN;
4241         info.data_data = NULL;
4242
4243         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4244                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4245                                     &info);
4246         if (err)
4247                 return err;
4248
4249         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4250                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4251                                     &info);
4252         if (err)
4253                 return err;
4254
4255         /* Now startup only the RX cpu. */
4256         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4257         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4258
4259         for (i = 0; i < 5; i++) {
4260                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4261                         break;
4262                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4263                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4264                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4265                 udelay(1000);
4266         }
4267         if (i >= 5) {
4268                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4269                        "to set RX CPU PC, is %08x should be %08x\n",
4270                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4271                        TG3_FW_TEXT_ADDR);
4272                 return -ENODEV;
4273         }
4274         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4275         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4276
4277         return 0;
4278 }
4279
4280 #if TG3_TSO_SUPPORT != 0
4281
4282 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4283 #define TG3_TSO_FW_RELASE_MINOR         0x6
4284 #define TG3_TSO_FW_RELEASE_FIX          0x0
4285 #define TG3_TSO_FW_START_ADDR           0x08000000
4286 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4287 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4288 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4289 #define TG3_TSO_FW_RODATA_LEN           0x60
4290 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4291 #define TG3_TSO_FW_DATA_LEN             0x30
4292 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4293 #define TG3_TSO_FW_SBSS_LEN             0x2c
4294 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4295 #define TG3_TSO_FW_BSS_LEN              0x894
4296
4297 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4298         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4299         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4300         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4301         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4302         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4303         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4304         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4305         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4306         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4307         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4308         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4309         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4310         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4311         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4312         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4313         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4314         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4315         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4316         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4317         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4318         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4319         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4320         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4321         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4322         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4323         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4324         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4325         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4326         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4327         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4328         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4329         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4330         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4331         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4332         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4333         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4334         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4335         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4336         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4337         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4338         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4339         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4340         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4341         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4342         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4343         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4344         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4345         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4346         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4347         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4348         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4349         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4350         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4351         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4352         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4353         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4354         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4355         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4356         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4357         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4358         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4359         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4360         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4361         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4362         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4363         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4364         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4365         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4366         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4367         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4368         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4369         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4370         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4371         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4372         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4373         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4374         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4375         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4376         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4377         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4378         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4379         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4380         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4381         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4382         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4383         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4384         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4385         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4386         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4387         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4388         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4389         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4390         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4391         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4392         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4393         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4394         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4395         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4396         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4397         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4398         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4399         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4400         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4401         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4402         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4403         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4404         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4405         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4406         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4407         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4408         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4409         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4410         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4411         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4412         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4413         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4414         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4415         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4416         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4417         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4418         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4419         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4420         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4421         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4422         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4423         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4424         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4425         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4426         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4427         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4428         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4429         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4430         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4431         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4432         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4433         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4434         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4435         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4436         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4437         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4438         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4439         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4440         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4441         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4442         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4443         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4444         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4445         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4446         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4447         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4448         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4449         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4450         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4451         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4452         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4453         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4454         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4455         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4456         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4457         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4458         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4459         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4460         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4461         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4462         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4463         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4464         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4465         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4466         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4467         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4468         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4469         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4470         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4471         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4472         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4473         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4474         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4475         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4476         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4477         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4478         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4479         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4480         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4481         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4482         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4483         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4484         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4485         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4486         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4487         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4488         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4489         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4490         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4491         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4492         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4493         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4494         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4495         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4496         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4497         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4498         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4499         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4500         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4501         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4502         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4503         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4504         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4505         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4506         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4507         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4508         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4509         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4510         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4511         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4512         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4513         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4514         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4515         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4516         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4517         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4518         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4519         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4520         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4521         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4522         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4523         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4524         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4525         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4526         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4527         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4528         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4529         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4530         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4531         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4532         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4533         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4534         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4535         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4536         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4537         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4538         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4539         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4540         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4541         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4542         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4543         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4544         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4545         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4546         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4547         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4548         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4549         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4550         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4551         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4552         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4553         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4554         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4555         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4556         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4557         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4558         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4559         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4560         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4561         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4562         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4563         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4564         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4565         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4566         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4567         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4568         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4569         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4570         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4571         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4572         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4573         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4574         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4575         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4576         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4577         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4578         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4579         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4580         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4581         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4582 };
4583
4584 static u32 tg3TsoFwRodata[] = {
4585         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4586         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4587         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4588         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4589         0x00000000,
4590 };
4591
4592 static u32 tg3TsoFwData[] = {
4593         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4594         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4595         0x00000000,
4596 };
4597
4598 /* 5705 needs a special version of the TSO firmware.  */
4599 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4600 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4601 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4602 #define TG3_TSO5_FW_START_ADDR          0x00010000
4603 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4604 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4605 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4606 #define TG3_TSO5_FW_RODATA_LEN          0x50
4607 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4608 #define TG3_TSO5_FW_DATA_LEN            0x20
4609 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4610 #define TG3_TSO5_FW_SBSS_LEN            0x28
4611 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4612 #define TG3_TSO5_FW_BSS_LEN             0x88
4613
4614 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4615         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4616         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4617         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4618         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4619         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4620         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4621         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4622         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4623         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4624         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4625         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4626         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4627         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4628         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4629         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4630         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4631         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4632         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4633         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4634         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4635         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4636         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4637         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4638         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4639         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4640         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4641         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4642         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4643         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4644         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4645         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4646         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4647         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4648         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4649         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4650         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4651         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4652         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4653         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4654         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4655         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4656         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4657         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4658         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4659         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4660         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4661         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4662         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4663         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4664         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4665         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4666         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4667         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4668         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4669         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4670         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4671         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4672         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4673         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4674         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4675         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4676         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4677         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4678         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4679         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4680         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4681         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4682         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4683         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4684         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4685         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4686         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4687         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4688         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4689         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4690         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4691         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4692         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4693         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4694         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4695         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4696         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4697         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4698         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4699         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4700         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4701         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4702         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4703         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4704         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4705         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4706         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4707         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4708         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4709         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4710         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4711         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4712         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4713         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4714         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4715         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4716         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4717         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4718         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4719         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4720         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4721         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4722         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4723         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4724         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4725         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4726         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4727         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4728         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4729         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4730         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4731         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4732         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4733         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4734         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4735         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4736         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4737         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4738         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4739         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4740         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4741         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4742         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4743         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4744         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4745         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4746         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4747         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4748         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4749         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4750         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4751         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4752         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4753         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4754         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4755         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4756         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4757         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4758         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4759         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4760         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4761         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4762         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4763         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4764         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4765         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4766         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4767         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4768         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4769         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4770         0x00000000, 0x00000000, 0x00000000,
4771 };
4772
4773 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4774         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4775         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4776         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4777         0x00000000, 0x00000000, 0x00000000,
4778 };
4779
4780 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4781         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4782         0x00000000, 0x00000000, 0x00000000,
4783 };
4784
4785 /* tp->lock is held. */
4786 static int tg3_load_tso_firmware(struct tg3 *tp)
4787 {
4788         struct fw_info info;
4789         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4790         int err, i;
4791
4792         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4793                 return 0;
4794
4795         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4796                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4797                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4798                 info.text_data = &tg3Tso5FwText[0];
4799                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4800                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4801                 info.rodata_data = &tg3Tso5FwRodata[0];
4802                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4803                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4804                 info.data_data = &tg3Tso5FwData[0];
4805                 cpu_base = RX_CPU_BASE;
4806                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4807                 cpu_scratch_size = (info.text_len +
4808                                     info.rodata_len +
4809                                     info.data_len +
4810                                     TG3_TSO5_FW_SBSS_LEN +
4811                                     TG3_TSO5_FW_BSS_LEN);
4812         } else {
4813                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4814                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4815                 info.text_data = &tg3TsoFwText[0];
4816                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4817                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4818                 info.rodata_data = &tg3TsoFwRodata[0];
4819                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4820                 info.data_len = TG3_TSO_FW_DATA_LEN;
4821                 info.data_data = &tg3TsoFwData[0];
4822                 cpu_base = TX_CPU_BASE;
4823                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4824                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4825         }
4826
4827         err = tg3_load_firmware_cpu(tp, cpu_base,
4828                                     cpu_scratch_base, cpu_scratch_size,
4829                                     &info);
4830         if (err)
4831                 return err;
4832
4833         /* Now startup the cpu. */
4834         tw32(cpu_base + CPU_STATE, 0xffffffff);
4835         tw32_f(cpu_base + CPU_PC,    info.text_base);
4836
4837         for (i = 0; i < 5; i++) {
4838                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4839                         break;
4840                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4841                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4842                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4843                 udelay(1000);
4844         }
4845         if (i >= 5) {
4846                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4847                        "to set CPU PC, is %08x should be %08x\n",
4848                        tp->dev->name, tr32(cpu_base + CPU_PC),
4849                        info.text_base);
4850                 return -ENODEV;
4851         }
4852         tw32(cpu_base + CPU_STATE, 0xffffffff);
4853         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4854         return 0;
4855 }
4856
4857 #endif /* TG3_TSO_SUPPORT != 0 */
4858
4859 /* tp->lock is held. */
4860 static void __tg3_set_mac_addr(struct tg3 *tp)
4861 {
4862         u32 addr_high, addr_low;
4863         int i;
4864
4865         addr_high = ((tp->dev->dev_addr[0] << 8) |
4866                      tp->dev->dev_addr[1]);
4867         addr_low = ((tp->dev->dev_addr[2] << 24) |
4868                     (tp->dev->dev_addr[3] << 16) |
4869                     (tp->dev->dev_addr[4] <<  8) |
4870                     (tp->dev->dev_addr[5] <<  0));
4871         for (i = 0; i < 4; i++) {
4872                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4873                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4874         }
4875
4876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4878                 for (i = 0; i < 12; i++) {
4879                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4880                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4881                 }
4882         }
4883
4884         addr_high = (tp->dev->dev_addr[0] +
4885                      tp->dev->dev_addr[1] +
4886                      tp->dev->dev_addr[2] +
4887                      tp->dev->dev_addr[3] +
4888                      tp->dev->dev_addr[4] +
4889                      tp->dev->dev_addr[5]) &
4890                 TX_BACKOFF_SEED_MASK;
4891         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4892 }
4893
4894 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4895 {
4896         struct tg3 *tp = netdev_priv(dev);
4897         struct sockaddr *addr = p;
4898
4899         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4900
4901         spin_lock_irq(&tp->lock);
4902         __tg3_set_mac_addr(tp);
4903         spin_unlock_irq(&tp->lock);
4904
4905         return 0;
4906 }
4907
4908 /* tp->lock is held. */
4909 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4910                            dma_addr_t mapping, u32 maxlen_flags,
4911                            u32 nic_addr)
4912 {
4913         tg3_write_mem(tp,
4914                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4915                       ((u64) mapping >> 32));
4916         tg3_write_mem(tp,
4917                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4918                       ((u64) mapping & 0xffffffff));
4919         tg3_write_mem(tp,
4920                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4921                        maxlen_flags);
4922
4923         if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
4924             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750))
4925                 tg3_write_mem(tp,
4926                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4927                               nic_addr);
4928 }
4929
4930 static void __tg3_set_rx_mode(struct net_device *);
4931
4932 /* tp->lock is held. */
4933 static int tg3_reset_hw(struct tg3 *tp)
4934 {
4935         u32 val, rdmac_mode;
4936         int i, err, limit;
4937
4938         tg3_disable_ints(tp);
4939
4940         tg3_stop_fw(tp);
4941
4942         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4943
4944         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4945                 err = tg3_abort_hw(tp);
4946                 if (err)
4947                         return err;
4948         }
4949
4950         err = tg3_chip_reset(tp);
4951         if (err)
4952                 return err;
4953
4954         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4955
4956         /* This works around an issue with Athlon chipsets on
4957          * B3 tigon3 silicon.  This bit has no effect on any
4958          * other revision.  But do not set this on PCI Express
4959          * chips.
4960          */
4961         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4962                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4963         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4964
4965         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4966             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4967                 val = tr32(TG3PCI_PCISTATE);
4968                 val |= PCISTATE_RETRY_SAME_DMA;
4969                 tw32(TG3PCI_PCISTATE, val);
4970         }
4971
4972         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4973                 /* Enable some hw fixes.  */
4974                 val = tr32(TG3PCI_MSI_DATA);
4975                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4976                 tw32(TG3PCI_MSI_DATA, val);
4977         }
4978
4979         /* Descriptor ring init may make accesses to the
4980          * NIC SRAM area to setup the TX descriptors, so we
4981          * can only do this after the hardware has been
4982          * successfully reset.
4983          */
4984         tg3_init_rings(tp);
4985
4986         /* This value is determined during the probe time DMA
4987          * engine test, tg3_test_dma.
4988          */
4989         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4990
4991         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4992                           GRC_MODE_4X_NIC_SEND_RINGS |
4993                           GRC_MODE_NO_TX_PHDR_CSUM |
4994                           GRC_MODE_NO_RX_PHDR_CSUM);
4995         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4996         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4997                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4998         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4999                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5000
5001         tw32(GRC_MODE,
5002              tp->grc_mode |
5003              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5004
5005         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5006         val = tr32(GRC_MISC_CFG);
5007         val &= ~0xff;
5008         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5009         tw32(GRC_MISC_CFG, val);
5010
5011         /* Initialize MBUF/DESC pool. */
5012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5013                 /* Do nothing.  */
5014         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5015                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5017                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5018                 else
5019                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5020                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5021                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5022         }
5023 #if TG3_TSO_SUPPORT != 0
5024         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5025                 int fw_len;
5026
5027                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5028                           TG3_TSO5_FW_RODATA_LEN +
5029                           TG3_TSO5_FW_DATA_LEN +
5030                           TG3_TSO5_FW_SBSS_LEN +
5031                           TG3_TSO5_FW_BSS_LEN);
5032                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5033                 tw32(BUFMGR_MB_POOL_ADDR,
5034                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5035                 tw32(BUFMGR_MB_POOL_SIZE,
5036                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5037         }
5038 #endif
5039
5040         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5041                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5042                      tp->bufmgr_config.mbuf_read_dma_low_water);
5043                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5044                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5045                 tw32(BUFMGR_MB_HIGH_WATER,
5046                      tp->bufmgr_config.mbuf_high_water);
5047         } else {
5048                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5049                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5050                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5051                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5052                 tw32(BUFMGR_MB_HIGH_WATER,
5053                      tp->bufmgr_config.mbuf_high_water_jumbo);
5054         }
5055         tw32(BUFMGR_DMA_LOW_WATER,
5056              tp->bufmgr_config.dma_low_water);
5057         tw32(BUFMGR_DMA_HIGH_WATER,
5058              tp->bufmgr_config.dma_high_water);
5059
5060         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5061         for (i = 0; i < 2000; i++) {
5062                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5063                         break;
5064                 udelay(10);
5065         }
5066         if (i >= 2000) {
5067                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5068                        tp->dev->name);
5069                 return -ENODEV;
5070         }
5071
5072         /* Setup replenish threshold. */
5073         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5074
5075         /* Initialize TG3_BDINFO's at:
5076          *  RCVDBDI_STD_BD:     standard eth size rx ring
5077          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5078          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5079          *
5080          * like so:
5081          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5082          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5083          *                              ring attribute flags
5084          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5085          *
5086          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5087          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5088          *
5089          * The size of each ring is fixed in the firmware, but the location is
5090          * configurable.
5091          */
5092         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5093              ((u64) tp->rx_std_mapping >> 32));
5094         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5095              ((u64) tp->rx_std_mapping & 0xffffffff));
5096         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5097              NIC_SRAM_RX_BUFFER_DESC);
5098
5099         /* Don't even try to program the JUMBO/MINI buffer descriptor
5100          * configs on 5705.
5101          */
5102         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5104                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5105                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5106         } else {
5107                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5108                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5109
5110                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5111                      BDINFO_FLAGS_DISABLED);
5112
5113                 /* Setup replenish threshold. */
5114                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5115
5116                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5117                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5118                              ((u64) tp->rx_jumbo_mapping >> 32));
5119                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5120                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5121                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5122                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5123                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5124                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5125                 } else {
5126                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5127                              BDINFO_FLAGS_DISABLED);
5128                 }
5129
5130         }
5131
5132         /* There is only one send ring on 5705/5750, no need to explicitly
5133          * disable the others.
5134          */
5135         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5137                 /* Clear out send RCB ring in SRAM. */
5138                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5139                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5140                                       BDINFO_FLAGS_DISABLED);
5141         }
5142
5143         tp->tx_prod = 0;
5144         tp->tx_cons = 0;
5145         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5146         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5147
5148         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5149                        tp->tx_desc_mapping,
5150                        (TG3_TX_RING_SIZE <<
5151                         BDINFO_FLAGS_MAXLEN_SHIFT),
5152                        NIC_SRAM_TX_BUFFER_DESC);
5153
5154         /* There is only one receive return ring on 5705/5750, no need
5155          * to explicitly disable the others.
5156          */
5157         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5158             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5159                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5160                      i += TG3_BDINFO_SIZE) {
5161                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5162                                       BDINFO_FLAGS_DISABLED);
5163                 }
5164         }
5165
5166         tp->rx_rcb_ptr = 0;
5167         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5168
5169         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5170                        tp->rx_rcb_mapping,
5171                        (TG3_RX_RCB_RING_SIZE(tp) <<
5172                         BDINFO_FLAGS_MAXLEN_SHIFT),
5173                        0);
5174
5175         tp->rx_std_ptr = tp->rx_pending;
5176         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5177                      tp->rx_std_ptr);
5178
5179         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5180                                                 tp->rx_jumbo_pending : 0;
5181         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5182                      tp->rx_jumbo_ptr);
5183
5184         /* Initialize MAC address and backoff seed. */
5185         __tg3_set_mac_addr(tp);
5186
5187         /* MTU + ethernet header + FCS + optional VLAN tag */
5188         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5189
5190         /* The slot time is changed by tg3_setup_phy if we
5191          * run at gigabit with half duplex.
5192          */
5193         tw32(MAC_TX_LENGTHS,
5194              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5195              (6 << TX_LENGTHS_IPG_SHIFT) |
5196              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5197
5198         /* Receive rules. */
5199         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5200         tw32(RCVLPC_CONFIG, 0x0181);
5201
5202         /* Calculate RDMAC_MODE setting early, we need it to determine
5203          * the RCVLPC_STATE_ENABLE mask.
5204          */
5205         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5206                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5207                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5208                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5209                       RDMAC_MODE_LNGREAD_ENAB);
5210         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5211                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5212         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5213              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5214             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5215                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5216                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5217                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5218                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5219                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5220                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5221                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5222                 }
5223         }
5224
5225 #if TG3_TSO_SUPPORT != 0
5226         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5227                 rdmac_mode |= (1 << 27);
5228 #endif
5229
5230         /* Receive/send statistics. */
5231         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5232             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5233                 val = tr32(RCVLPC_STATS_ENABLE);
5234                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5235                 tw32(RCVLPC_STATS_ENABLE, val);
5236         } else {
5237                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5238         }
5239         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5240         tw32(SNDDATAI_STATSENAB, 0xffffff);
5241         tw32(SNDDATAI_STATSCTRL,
5242              (SNDDATAI_SCTRL_ENABLE |
5243               SNDDATAI_SCTRL_FASTUPD));
5244
5245         /* Setup host coalescing engine. */
5246         tw32(HOSTCC_MODE, 0);
5247         for (i = 0; i < 2000; i++) {
5248                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5249                         break;
5250                 udelay(10);
5251         }
5252
5253         tw32(HOSTCC_RXCOL_TICKS, 0);
5254         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5255         tw32(HOSTCC_RXMAX_FRAMES, 1);
5256         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5257         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5258             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5259                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5260                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5261         }
5262         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5263         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5264
5265         /* set status block DMA address */
5266         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5267              ((u64) tp->status_mapping >> 32));
5268         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5269              ((u64) tp->status_mapping & 0xffffffff));
5270
5271         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5272             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5273                 /* Status/statistics block address.  See tg3_timer,
5274                  * the tg3_periodic_fetch_stats call there, and
5275                  * tg3_get_stats to see how this works for 5705/5750 chips.
5276                  */
5277                 tw32(HOSTCC_STAT_COAL_TICKS,
5278                      DEFAULT_STAT_COAL_TICKS);
5279                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5280                      ((u64) tp->stats_mapping >> 32));
5281                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5282                      ((u64) tp->stats_mapping & 0xffffffff));
5283                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5284                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5285         }
5286
5287         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5288
5289         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5290         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5291         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5292             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5293                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5294
5295         /* Clear statistics/status block in chip, and status block in ram. */
5296         for (i = NIC_SRAM_STATS_BLK;
5297              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5298              i += sizeof(u32)) {
5299                 tg3_write_mem(tp, i, 0);
5300                 udelay(40);
5301         }
5302         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5303
5304         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5305                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5306         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5307         udelay(40);
5308
5309         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5311                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5312                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5313         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5314         udelay(100);
5315
5316         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5317         tr32(MAILBOX_INTERRUPT_0);
5318
5319         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5320             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5321                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5322                 udelay(40);
5323         }
5324
5325         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5326                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5327                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5328                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5329                WDMAC_MODE_LNGREAD_ENAB);
5330
5331         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5332              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5333             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5334                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5335                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5336                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5337                         /* nothing */
5338                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5339                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5340                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5341                         val |= WDMAC_MODE_RX_ACCEL;
5342                 }
5343         }
5344
5345         tw32_f(WDMAC_MODE, val);
5346         udelay(40);
5347
5348         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5349                 val = tr32(TG3PCI_X_CAPS);
5350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5351                         val &= ~PCIX_CAPS_BURST_MASK;
5352                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5353                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5354                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5355                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5356                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5357                                 val |= (tp->split_mode_max_reqs <<
5358                                         PCIX_CAPS_SPLIT_SHIFT);
5359                 }
5360                 tw32(TG3PCI_X_CAPS, val);
5361         }
5362
5363         tw32_f(RDMAC_MODE, rdmac_mode);
5364         udelay(40);
5365
5366         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5367         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5368             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5369                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5370         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5371         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5372         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5373         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5374         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5375 #if TG3_TSO_SUPPORT != 0
5376         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5377                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5378 #endif
5379         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5380         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5381
5382         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5383                 err = tg3_load_5701_a0_firmware_fix(tp);
5384                 if (err)
5385                         return err;
5386         }
5387
5388 #if TG3_TSO_SUPPORT != 0
5389         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5390                 err = tg3_load_tso_firmware(tp);
5391                 if (err)
5392                         return err;
5393         }
5394 #endif
5395
5396         tp->tx_mode = TX_MODE_ENABLE;
5397         tw32_f(MAC_TX_MODE, tp->tx_mode);
5398         udelay(100);
5399
5400         tp->rx_mode = RX_MODE_ENABLE;
5401         tw32_f(MAC_RX_MODE, tp->rx_mode);
5402         udelay(10);
5403
5404         if (tp->link_config.phy_is_low_power) {
5405                 tp->link_config.phy_is_low_power = 0;
5406                 tp->link_config.speed = tp->link_config.orig_speed;
5407                 tp->link_config.duplex = tp->link_config.orig_duplex;
5408                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5409         }
5410
5411         tp->mi_mode = MAC_MI_MODE_BASE;
5412         tw32_f(MAC_MI_MODE, tp->mi_mode);
5413         udelay(80);
5414
5415         tw32(MAC_LED_CTRL, tp->led_ctrl);
5416
5417         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5418         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5419                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5420                 udelay(10);
5421         }
5422         tw32_f(MAC_RX_MODE, tp->rx_mode);
5423         udelay(10);
5424
5425         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5426                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5427                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5428                         /* Set drive transmission level to 1.2V  */
5429                         /* only if the signal pre-emphasis bit is not set  */
5430                         val = tr32(MAC_SERDES_CFG);
5431                         val &= 0xfffff000;
5432                         val |= 0x880;
5433                         tw32(MAC_SERDES_CFG, val);
5434                 }
5435                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5436                         tw32(MAC_SERDES_CFG, 0x616000);
5437         }
5438
5439         /* Prevent chip from dropping frames when flow control
5440          * is enabled.
5441          */
5442         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5443
5444         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5445             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5446                 /* Use hardware link auto-negotiation */
5447                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5448         }
5449
5450         err = tg3_setup_phy(tp, 1);
5451         if (err)
5452                 return err;
5453
5454         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5455                 u32 tmp;
5456
5457                 /* Clear CRC stats. */
5458                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5459                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5460                         tg3_readphy(tp, 0x14, &tmp);
5461                 }
5462         }
5463
5464         __tg3_set_rx_mode(tp->dev);
5465
5466         /* Initialize receive rules. */
5467         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5468         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5469         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5470         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5471
5472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5474                 limit = 8;
5475         else
5476                 limit = 16;
5477         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5478                 limit -= 4;
5479         switch (limit) {
5480         case 16:
5481                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5482         case 15:
5483                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5484         case 14:
5485                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5486         case 13:
5487                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5488         case 12:
5489                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5490         case 11:
5491                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5492         case 10:
5493                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5494         case 9:
5495                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5496         case 8:
5497                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5498         case 7:
5499                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5500         case 6:
5501                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5502         case 5:
5503                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5504         case 4:
5505                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5506         case 3:
5507                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5508         case 2:
5509         case 1:
5510
5511         default:
5512                 break;
5513         };
5514
5515         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5516
5517         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5518                 tg3_enable_ints(tp);
5519
5520         return 0;
5521 }
5522
5523 /* Called at device open time to get the chip ready for
5524  * packet processing.  Invoked with tp->lock held.
5525  */
5526 static int tg3_init_hw(struct tg3 *tp)
5527 {
5528         int err;
5529
5530         /* Force the chip into D0. */
5531         err = tg3_set_power_state(tp, 0);
5532         if (err)
5533                 goto out;
5534
5535         tg3_switch_clocks(tp);
5536
5537         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5538
5539         err = tg3_reset_hw(tp);
5540
5541 out:
5542         return err;
5543 }
5544
5545 #define TG3_STAT_ADD32(PSTAT, REG) \
5546 do {    u32 __val = tr32(REG); \
5547         (PSTAT)->low += __val; \
5548         if ((PSTAT)->low < __val) \
5549                 (PSTAT)->high += 1; \
5550 } while (0)
5551
5552 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5553 {
5554         struct tg3_hw_stats *sp = tp->hw_stats;
5555
5556         if (!netif_carrier_ok(tp->dev))
5557                 return;
5558
5559         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5560         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5561         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5562         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5563         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5564         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5565         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5566         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5567         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5568         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5569         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5570         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5571         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5572
5573         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5574         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5575         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5576         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5577         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5578         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5579         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5580         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5581         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5582         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5583         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5584         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5585         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5586         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5587 }
5588
5589 static void tg3_timer(unsigned long __opaque)
5590 {
5591         struct tg3 *tp = (struct tg3 *) __opaque;
5592         unsigned long flags;
5593
5594         spin_lock_irqsave(&tp->lock, flags);
5595         spin_lock(&tp->tx_lock);
5596
5597         /* All of this garbage is because when using non-tagged
5598          * IRQ status the mailbox/status_block protocol the chip
5599          * uses with the cpu is race prone.
5600          */
5601         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5602                 tw32(GRC_LOCAL_CTRL,
5603                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5604         } else {
5605                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5606                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5607         }
5608
5609         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5610                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5611                 spin_unlock(&tp->tx_lock);
5612                 spin_unlock_irqrestore(&tp->lock, flags);
5613                 schedule_work(&tp->reset_task);
5614                 return;
5615         }
5616
5617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5619                 tg3_periodic_fetch_stats(tp);
5620
5621         /* This part only runs once per second. */
5622         if (!--tp->timer_counter) {
5623                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5624                         u32 mac_stat;
5625                         int phy_event;
5626
5627                         mac_stat = tr32(MAC_STATUS);
5628
5629                         phy_event = 0;
5630                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5631                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5632                                         phy_event = 1;
5633                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5634                                 phy_event = 1;
5635
5636                         if (phy_event)
5637                                 tg3_setup_phy(tp, 0);
5638                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5639                         u32 mac_stat = tr32(MAC_STATUS);
5640                         int need_setup = 0;
5641
5642                         if (netif_carrier_ok(tp->dev) &&
5643                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5644                                 need_setup = 1;
5645                         }
5646                         if (! netif_carrier_ok(tp->dev) &&
5647                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5648                                          MAC_STATUS_SIGNAL_DET))) {
5649                                 need_setup = 1;
5650                         }
5651                         if (need_setup) {
5652                                 tw32_f(MAC_MODE,
5653                                      (tp->mac_mode &
5654                                       ~MAC_MODE_PORT_MODE_MASK));
5655                                 udelay(40);
5656                                 tw32_f(MAC_MODE, tp->mac_mode);
5657                                 udelay(40);
5658                                 tg3_setup_phy(tp, 0);
5659                         }
5660                 }
5661
5662                 tp->timer_counter = tp->timer_multiplier;
5663         }
5664
5665         /* Heartbeat is only sent once every 120 seconds.  */
5666         if (!--tp->asf_counter) {
5667                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5668                         u32 val;
5669
5670                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5671                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5672                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5673                         val = tr32(GRC_RX_CPU_EVENT);
5674                         val |= (1 << 14);
5675                         tw32(GRC_RX_CPU_EVENT, val);
5676                 }
5677                 tp->asf_counter = tp->asf_multiplier;
5678         }
5679
5680         spin_unlock(&tp->tx_lock);
5681         spin_unlock_irqrestore(&tp->lock, flags);
5682
5683         tp->timer.expires = jiffies + tp->timer_offset;
5684         add_timer(&tp->timer);
5685 }
5686
5687 static int tg3_open(struct net_device *dev)
5688 {
5689         struct tg3 *tp = netdev_priv(dev);
5690         int err;
5691
5692         spin_lock_irq(&tp->lock);
5693         spin_lock(&tp->tx_lock);
5694
5695         tg3_disable_ints(tp);
5696         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5697
5698         spin_unlock(&tp->tx_lock);
5699         spin_unlock_irq(&tp->lock);
5700
5701         /* The placement of this call is tied
5702          * to the setup and use of Host TX descriptors.
5703          */
5704         err = tg3_alloc_consistent(tp);
5705         if (err)
5706                 return err;
5707
5708         err = request_irq(dev->irq, tg3_interrupt,
5709                           SA_SHIRQ, dev->name, dev);
5710
5711         if (err) {
5712                 tg3_free_consistent(tp);
5713                 return err;
5714         }
5715
5716         spin_lock_irq(&tp->lock);
5717         spin_lock(&tp->tx_lock);
5718
5719         err = tg3_init_hw(tp);
5720         if (err) {
5721                 tg3_halt(tp);
5722                 tg3_free_rings(tp);
5723         } else {
5724                 tp->timer_offset = HZ / 10;
5725                 tp->timer_counter = tp->timer_multiplier = 10;
5726                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5727
5728                 init_timer(&tp->timer);
5729                 tp->timer.expires = jiffies + tp->timer_offset;
5730                 tp->timer.data = (unsigned long) tp;
5731                 tp->timer.function = tg3_timer;
5732                 add_timer(&tp->timer);
5733
5734                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5735         }
5736
5737         spin_unlock(&tp->tx_lock);
5738         spin_unlock_irq(&tp->lock);
5739
5740         if (err) {
5741                 free_irq(dev->irq, dev);
5742                 tg3_free_consistent(tp);
5743                 return err;
5744         }
5745
5746         spin_lock_irq(&tp->lock);
5747         spin_lock(&tp->tx_lock);
5748
5749         tg3_enable_ints(tp);
5750
5751         spin_unlock(&tp->tx_lock);
5752         spin_unlock_irq(&tp->lock);
5753
5754         netif_start_queue(dev);
5755
5756         return 0;
5757 }
5758
5759 #if 0
5760 /*static*/ void tg3_dump_state(struct tg3 *tp)
5761 {
5762         u32 val32, val32_2, val32_3, val32_4, val32_5;
5763         u16 val16;
5764         int i;
5765
5766         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5767         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5768         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5769                val16, val32);
5770
5771         /* MAC block */
5772         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5773                tr32(MAC_MODE), tr32(MAC_STATUS));
5774         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5775                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5776         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5777                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5778         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5779                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5780
5781         /* Send data initiator control block */
5782         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5783                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5784         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5785                tr32(SNDDATAI_STATSCTRL));
5786
5787         /* Send data completion control block */
5788         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5789
5790         /* Send BD ring selector block */
5791         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5792                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5793
5794         /* Send BD initiator control block */
5795         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5796                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5797
5798         /* Send BD completion control block */
5799         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5800
5801         /* Receive list placement control block */
5802         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5803                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5804         printk("       RCVLPC_STATSCTRL[%08x]\n",
5805                tr32(RCVLPC_STATSCTRL));
5806
5807         /* Receive data and receive BD initiator control block */
5808         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5809                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5810
5811         /* Receive data completion control block */
5812         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5813                tr32(RCVDCC_MODE));
5814
5815         /* Receive BD initiator control block */
5816         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5817                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5818
5819         /* Receive BD completion control block */
5820         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5821                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5822
5823         /* Receive list selector control block */
5824         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5825                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5826
5827         /* Mbuf cluster free block */
5828         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5829                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5830
5831         /* Host coalescing control block */
5832         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5833                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5834         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5835                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5836                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5837         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5838                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5839                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5840         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5841                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5842         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5843                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5844
5845         /* Memory arbiter control block */
5846         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5847                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5848
5849         /* Buffer manager control block */
5850         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5851                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5852         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5853                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5854         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5855                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5856                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5857                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5858
5859         /* Read DMA control block */
5860         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5861                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5862
5863         /* Write DMA control block */
5864         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5865                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5866
5867         /* DMA completion block */
5868         printk("DEBUG: DMAC_MODE[%08x]\n",
5869                tr32(DMAC_MODE));
5870
5871         /* GRC block */
5872         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5873                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5874         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5875                tr32(GRC_LOCAL_CTRL));
5876
5877         /* TG3_BDINFOs */
5878         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5879                tr32(RCVDBDI_JUMBO_BD + 0x0),
5880                tr32(RCVDBDI_JUMBO_BD + 0x4),
5881                tr32(RCVDBDI_JUMBO_BD + 0x8),
5882                tr32(RCVDBDI_JUMBO_BD + 0xc));
5883         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5884                tr32(RCVDBDI_STD_BD + 0x0),
5885                tr32(RCVDBDI_STD_BD + 0x4),
5886                tr32(RCVDBDI_STD_BD + 0x8),
5887                tr32(RCVDBDI_STD_BD + 0xc));
5888         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5889                tr32(RCVDBDI_MINI_BD + 0x0),
5890                tr32(RCVDBDI_MINI_BD + 0x4),
5891                tr32(RCVDBDI_MINI_BD + 0x8),
5892                tr32(RCVDBDI_MINI_BD + 0xc));
5893
5894         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5895         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5896         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5897         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5898         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5899                val32, val32_2, val32_3, val32_4);
5900
5901         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5902         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5903         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5904         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5905         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5906                val32, val32_2, val32_3, val32_4);
5907
5908         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5909         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5910         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5911         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5912         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5913         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5914                val32, val32_2, val32_3, val32_4, val32_5);
5915
5916         /* SW status block */
5917         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5918                tp->hw_status->status,
5919                tp->hw_status->status_tag,
5920                tp->hw_status->rx_jumbo_consumer,
5921                tp->hw_status->rx_consumer,
5922                tp->hw_status->rx_mini_consumer,
5923                tp->hw_status->idx[0].rx_producer,
5924                tp->hw_status->idx[0].tx_consumer);
5925
5926         /* SW statistics block */
5927         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5928                ((u32 *)tp->hw_stats)[0],
5929                ((u32 *)tp->hw_stats)[1],
5930                ((u32 *)tp->hw_stats)[2],
5931                ((u32 *)tp->hw_stats)[3]);
5932
5933         /* Mailboxes */
5934         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5935                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5936                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5937                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5938                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5939
5940         /* NIC side send descriptors. */
5941         for (i = 0; i < 6; i++) {
5942                 unsigned long txd;
5943
5944                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5945                         + (i * sizeof(struct tg3_tx_buffer_desc));
5946                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5947                        i,
5948                        readl(txd + 0x0), readl(txd + 0x4),
5949                        readl(txd + 0x8), readl(txd + 0xc));
5950         }
5951
5952         /* NIC side RX descriptors. */
5953         for (i = 0; i < 6; i++) {
5954                 unsigned long rxd;
5955
5956                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5957                         + (i * sizeof(struct tg3_rx_buffer_desc));
5958                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5959                        i,
5960                        readl(rxd + 0x0), readl(rxd + 0x4),
5961                        readl(rxd + 0x8), readl(rxd + 0xc));
5962                 rxd += (4 * sizeof(u32));
5963                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5964                        i,
5965                        readl(rxd + 0x0), readl(rxd + 0x4),
5966                        readl(rxd + 0x8), readl(rxd + 0xc));
5967         }
5968
5969         for (i = 0; i < 6; i++) {
5970                 unsigned long rxd;
5971
5972                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5973                         + (i * sizeof(struct tg3_rx_buffer_desc));
5974                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5975                        i,
5976                        readl(rxd + 0x0), readl(rxd + 0x4),
5977                        readl(rxd + 0x8), readl(rxd + 0xc));
5978                 rxd += (4 * sizeof(u32));
5979                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5980                        i,
5981                        readl(rxd + 0x0), readl(rxd + 0x4),
5982                        readl(rxd + 0x8), readl(rxd + 0xc));
5983         }
5984 }
5985 #endif
5986
5987 static struct net_device_stats *tg3_get_stats(struct net_device *);
5988 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5989
5990 static int tg3_close(struct net_device *dev)
5991 {
5992         struct tg3 *tp = netdev_priv(dev);
5993
5994         netif_stop_queue(dev);
5995
5996         del_timer_sync(&tp->timer);
5997
5998         spin_lock_irq(&tp->lock);
5999         spin_lock(&tp->tx_lock);
6000 #if 0
6001         tg3_dump_state(tp);
6002 #endif
6003
6004         tg3_disable_ints(tp);
6005
6006         tg3_halt(tp);
6007         tg3_free_rings(tp);
6008         tp->tg3_flags &=
6009                 ~(TG3_FLAG_INIT_COMPLETE |
6010                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6011         netif_carrier_off(tp->dev);
6012
6013         spin_unlock(&tp->tx_lock);
6014         spin_unlock_irq(&tp->lock);
6015
6016         free_irq(dev->irq, dev);
6017
6018         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6019                sizeof(tp->net_stats_prev));
6020         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6021                sizeof(tp->estats_prev));
6022
6023         tg3_free_consistent(tp);
6024
6025         return 0;
6026 }
6027
6028 static inline unsigned long get_stat64(tg3_stat64_t *val)
6029 {
6030         unsigned long ret;
6031
6032 #if (BITS_PER_LONG == 32)
6033         ret = val->low;
6034 #else
6035         ret = ((u64)val->high << 32) | ((u64)val->low);
6036 #endif
6037         return ret;
6038 }
6039
6040 static unsigned long calc_crc_errors(struct tg3 *tp)
6041 {
6042         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6043
6044         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6045             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6046              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6047                 unsigned long flags;
6048                 u32 val;
6049
6050                 spin_lock_irqsave(&tp->lock, flags);
6051                 if (!tg3_readphy(tp, 0x1e, &val)) {
6052                         tg3_writephy(tp, 0x1e, val | 0x8000);
6053                         tg3_readphy(tp, 0x14, &val);
6054                 } else
6055                         val = 0;
6056                 spin_unlock_irqrestore(&tp->lock, flags);
6057
6058                 tp->phy_crc_errors += val;
6059
6060                 return tp->phy_crc_errors;
6061         }
6062
6063         return get_stat64(&hw_stats->rx_fcs_errors);
6064 }
6065
6066 #define ESTAT_ADD(member) \
6067         estats->member =        old_estats->member + \
6068                                 get_stat64(&hw_stats->member)
6069
6070 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6071 {
6072         struct tg3_ethtool_stats *estats = &tp->estats;
6073         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6074         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6075
6076         if (!hw_stats)
6077                 return old_estats;
6078
6079         ESTAT_ADD(rx_octets);
6080         ESTAT_ADD(rx_fragments);
6081         ESTAT_ADD(rx_ucast_packets);
6082         ESTAT_ADD(rx_mcast_packets);
6083         ESTAT_ADD(rx_bcast_packets);
6084         ESTAT_ADD(rx_fcs_errors);
6085         ESTAT_ADD(rx_align_errors);
6086         ESTAT_ADD(rx_xon_pause_rcvd);
6087         ESTAT_ADD(rx_xoff_pause_rcvd);
6088         ESTAT_ADD(rx_mac_ctrl_rcvd);
6089         ESTAT_ADD(rx_xoff_entered);
6090         ESTAT_ADD(rx_frame_too_long_errors);
6091         ESTAT_ADD(rx_jabbers);
6092         ESTAT_ADD(rx_undersize_packets);
6093         ESTAT_ADD(rx_in_length_errors);
6094         ESTAT_ADD(rx_out_length_errors);
6095         ESTAT_ADD(rx_64_or_less_octet_packets);
6096         ESTAT_ADD(rx_65_to_127_octet_packets);
6097         ESTAT_ADD(rx_128_to_255_octet_packets);
6098         ESTAT_ADD(rx_256_to_511_octet_packets);
6099         ESTAT_ADD(rx_512_to_1023_octet_packets);
6100         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6101         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6102         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6103         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6104         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6105
6106         ESTAT_ADD(tx_octets);
6107         ESTAT_ADD(tx_collisions);
6108         ESTAT_ADD(tx_xon_sent);
6109         ESTAT_ADD(tx_xoff_sent);
6110         ESTAT_ADD(tx_flow_control);
6111         ESTAT_ADD(tx_mac_errors);
6112         ESTAT_ADD(tx_single_collisions);
6113         ESTAT_ADD(tx_mult_collisions);
6114         ESTAT_ADD(tx_deferred);
6115         ESTAT_ADD(tx_excessive_collisions);
6116         ESTAT_ADD(tx_late_collisions);
6117         ESTAT_ADD(tx_collide_2times);
6118         ESTAT_ADD(tx_collide_3times);
6119         ESTAT_ADD(tx_collide_4times);
6120         ESTAT_ADD(tx_collide_5times);
6121         ESTAT_ADD(tx_collide_6times);
6122         ESTAT_ADD(tx_collide_7times);
6123         ESTAT_ADD(tx_collide_8times);
6124         ESTAT_ADD(tx_collide_9times);
6125         ESTAT_ADD(tx_collide_10times);
6126         ESTAT_ADD(tx_collide_11times);
6127         ESTAT_ADD(tx_collide_12times);
6128         ESTAT_ADD(tx_collide_13times);
6129         ESTAT_ADD(tx_collide_14times);
6130         ESTAT_ADD(tx_collide_15times);
6131         ESTAT_ADD(tx_ucast_packets);
6132         ESTAT_ADD(tx_mcast_packets);
6133         ESTAT_ADD(tx_bcast_packets);
6134         ESTAT_ADD(tx_carrier_sense_errors);
6135         ESTAT_ADD(tx_discards);
6136         ESTAT_ADD(tx_errors);
6137
6138         ESTAT_ADD(dma_writeq_full);
6139         ESTAT_ADD(dma_write_prioq_full);
6140         ESTAT_ADD(rxbds_empty);
6141         ESTAT_ADD(rx_discards);
6142         ESTAT_ADD(rx_errors);
6143         ESTAT_ADD(rx_threshold_hit);
6144
6145         ESTAT_ADD(dma_readq_full);
6146         ESTAT_ADD(dma_read_prioq_full);
6147         ESTAT_ADD(tx_comp_queue_full);
6148
6149         ESTAT_ADD(ring_set_send_prod_index);
6150         ESTAT_ADD(ring_status_update);
6151         ESTAT_ADD(nic_irqs);
6152         ESTAT_ADD(nic_avoided_irqs);
6153         ESTAT_ADD(nic_tx_threshold_hit);
6154
6155         return estats;
6156 }
6157
6158 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6159 {
6160         struct tg3 *tp = netdev_priv(dev);
6161         struct net_device_stats *stats = &tp->net_stats;
6162         struct net_device_stats *old_stats = &tp->net_stats_prev;
6163         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6164
6165         if (!hw_stats)
6166                 return old_stats;
6167
6168         stats->rx_packets = old_stats->rx_packets +
6169                 get_stat64(&hw_stats->rx_ucast_packets) +
6170                 get_stat64(&hw_stats->rx_mcast_packets) +
6171                 get_stat64(&hw_stats->rx_bcast_packets);
6172                 
6173         stats->tx_packets = old_stats->tx_packets +
6174                 get_stat64(&hw_stats->tx_ucast_packets) +
6175                 get_stat64(&hw_stats->tx_mcast_packets) +
6176                 get_stat64(&hw_stats->tx_bcast_packets);
6177
6178         stats->rx_bytes = old_stats->rx_bytes +
6179                 get_stat64(&hw_stats->rx_octets);
6180         stats->tx_bytes = old_stats->tx_bytes +
6181                 get_stat64(&hw_stats->tx_octets);
6182
6183         stats->rx_errors = old_stats->rx_errors +
6184                 get_stat64(&hw_stats->rx_errors) +
6185                 get_stat64(&hw_stats->rx_discards);
6186         stats->tx_errors = old_stats->tx_errors +
6187                 get_stat64(&hw_stats->tx_errors) +
6188                 get_stat64(&hw_stats->tx_mac_errors) +
6189                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6190                 get_stat64(&hw_stats->tx_discards);
6191
6192         stats->multicast = old_stats->multicast +
6193                 get_stat64(&hw_stats->rx_mcast_packets);
6194         stats->collisions = old_stats->collisions +
6195                 get_stat64(&hw_stats->tx_collisions);
6196
6197         stats->rx_length_errors = old_stats->rx_length_errors +
6198                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6199                 get_stat64(&hw_stats->rx_undersize_packets);
6200
6201         stats->rx_over_errors = old_stats->rx_over_errors +
6202                 get_stat64(&hw_stats->rxbds_empty);
6203         stats->rx_frame_errors = old_stats->rx_frame_errors +
6204                 get_stat64(&hw_stats->rx_align_errors);
6205         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6206                 get_stat64(&hw_stats->tx_discards);
6207         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6208                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6209
6210         stats->rx_crc_errors = old_stats->rx_crc_errors +
6211                 calc_crc_errors(tp);
6212
6213         return stats;
6214 }
6215
6216 static inline u32 calc_crc(unsigned char *buf, int len)
6217 {
6218         u32 reg;
6219         u32 tmp;
6220         int j, k;
6221
6222         reg = 0xffffffff;
6223
6224         for (j = 0; j < len; j++) {
6225                 reg ^= buf[j];
6226
6227                 for (k = 0; k < 8; k++) {
6228                         tmp = reg & 0x01;
6229
6230                         reg >>= 1;
6231
6232                         if (tmp) {
6233                                 reg ^= 0xedb88320;
6234                         }
6235                 }
6236         }
6237
6238         return ~reg;
6239 }
6240
6241 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6242 {
6243         /* accept or reject all multicast frames */
6244         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6245         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6246         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6247         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6248 }
6249
6250 static void __tg3_set_rx_mode(struct net_device *dev)
6251 {
6252         struct tg3 *tp = netdev_priv(dev);
6253         u32 rx_mode;
6254
6255         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6256                                   RX_MODE_KEEP_VLAN_TAG);
6257
6258         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6259          * flag clear.
6260          */
6261 #if TG3_VLAN_TAG_USED
6262         if (!tp->vlgrp &&
6263             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6264                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6265 #else
6266         /* By definition, VLAN is disabled always in this
6267          * case.
6268          */
6269         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6270                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6271 #endif
6272
6273         if (dev->flags & IFF_PROMISC) {
6274                 /* Promiscuous mode. */
6275                 rx_mode |= RX_MODE_PROMISC;
6276         } else if (dev->flags & IFF_ALLMULTI) {
6277                 /* Accept all multicast. */
6278                 tg3_set_multi (tp, 1);
6279         } else if (dev->mc_count < 1) {
6280                 /* Reject all multicast. */
6281                 tg3_set_multi (tp, 0);
6282         } else {
6283                 /* Accept one or more multicast(s). */
6284                 struct dev_mc_list *mclist;
6285                 unsigned int i;
6286                 u32 mc_filter[4] = { 0, };
6287                 u32 regidx;
6288                 u32 bit;
6289                 u32 crc;
6290
6291                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6292                      i++, mclist = mclist->next) {
6293
6294                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6295                         bit = ~crc & 0x7f;
6296                         regidx = (bit & 0x60) >> 5;
6297                         bit &= 0x1f;
6298                         mc_filter[regidx] |= (1 << bit);
6299                 }
6300
6301                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6302                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6303                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6304                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6305         }
6306
6307         if (rx_mode != tp->rx_mode) {
6308                 tp->rx_mode = rx_mode;
6309                 tw32_f(MAC_RX_MODE, rx_mode);
6310                 udelay(10);
6311         }
6312 }
6313
6314 static void tg3_set_rx_mode(struct net_device *dev)
6315 {
6316         struct tg3 *tp = netdev_priv(dev);
6317
6318         spin_lock_irq(&tp->lock);
6319         spin_lock(&tp->tx_lock);
6320         __tg3_set_rx_mode(dev);
6321         spin_unlock(&tp->tx_lock);
6322         spin_unlock_irq(&tp->lock);
6323 }
6324
6325 #define TG3_REGDUMP_LEN         (32 * 1024)
6326
6327 static int tg3_get_regs_len(struct net_device *dev)
6328 {
6329         return TG3_REGDUMP_LEN;
6330 }
6331
6332 static void tg3_get_regs(struct net_device *dev,
6333                 struct ethtool_regs *regs, void *_p)
6334 {
6335         u32 *p = _p;
6336         struct tg3 *tp = netdev_priv(dev);
6337         u8 *orig_p = _p;
6338         int i;
6339
6340         regs->version = 0;
6341
6342         memset(p, 0, TG3_REGDUMP_LEN);
6343
6344         spin_lock_irq(&tp->lock);
6345         spin_lock(&tp->tx_lock);
6346
6347 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6348 #define GET_REG32_LOOP(base,len)                \
6349 do {    p = (u32 *)(orig_p + (base));           \
6350         for (i = 0; i < len; i += 4)            \
6351                 __GET_REG32((base) + i);        \
6352 } while (0)
6353 #define GET_REG32_1(reg)                        \
6354 do {    p = (u32 *)(orig_p + (reg));            \
6355         __GET_REG32((reg));                     \
6356 } while (0)
6357
6358         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6359         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6360         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6361         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6362         GET_REG32_1(SNDDATAC_MODE);
6363         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6364         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6365         GET_REG32_1(SNDBDC_MODE);
6366         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6367         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6368         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6369         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6370         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6371         GET_REG32_1(RCVDCC_MODE);
6372         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6373         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6374         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6375         GET_REG32_1(MBFREE_MODE);
6376         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6377         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6378         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6379         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6380         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6381         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6382         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6383         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6384         GET_REG32_LOOP(FTQ_RESET, 0x120);
6385         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6386         GET_REG32_1(DMAC_MODE);
6387         GET_REG32_LOOP(GRC_MODE, 0x4c);
6388         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6389                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6390
6391 #undef __GET_REG32
6392 #undef GET_REG32_LOOP
6393 #undef GET_REG32_1
6394
6395         spin_unlock(&tp->tx_lock);
6396         spin_unlock_irq(&tp->lock);
6397 }
6398
6399 static int tg3_get_eeprom_len(struct net_device *dev)
6400 {
6401         struct tg3 *tp = netdev_priv(dev);
6402
6403         return tp->nvram_size;
6404 }
6405
6406 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6407
6408 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6409 {
6410         struct tg3 *tp = netdev_priv(dev);
6411         int ret;
6412         u8  *pd;
6413         u32 i, offset, len, val, b_offset, b_count;
6414
6415         offset = eeprom->offset;
6416         len = eeprom->len;
6417         eeprom->len = 0;
6418
6419         eeprom->magic = TG3_EEPROM_MAGIC;
6420
6421         if (offset & 3) {
6422                 /* adjustments to start on required 4 byte boundary */
6423                 b_offset = offset & 3;
6424                 b_count = 4 - b_offset;
6425                 if (b_count > len) {
6426                         /* i.e. offset=1 len=2 */
6427                         b_count = len;
6428                 }
6429                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6430                 if (ret)
6431                         return ret;
6432                 val = cpu_to_le32(val);
6433                 memcpy(data, ((char*)&val) + b_offset, b_count);
6434                 len -= b_count;
6435                 offset += b_count;
6436                 eeprom->len += b_count;
6437         }
6438
6439         /* read bytes upto the last 4 byte boundary */
6440         pd = &data[eeprom->len];
6441         for (i = 0; i < (len - (len & 3)); i += 4) {
6442                 ret = tg3_nvram_read(tp, offset + i, &val);
6443                 if (ret) {
6444                         eeprom->len += i;
6445                         return ret;
6446                 }
6447                 val = cpu_to_le32(val);
6448                 memcpy(pd + i, &val, 4);
6449         }
6450         eeprom->len += i;
6451
6452         if (len & 3) {
6453                 /* read last bytes not ending on 4 byte boundary */
6454                 pd = &data[eeprom->len];
6455                 b_count = len & 3;
6456                 b_offset = offset + len - b_count;
6457                 ret = tg3_nvram_read(tp, b_offset, &val);
6458                 if (ret)
6459                         return ret;
6460                 val = cpu_to_le32(val);
6461                 memcpy(pd, ((char*)&val), b_count);
6462                 eeprom->len += b_count;
6463         }
6464         return 0;
6465 }
6466
6467 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6468
6469 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6470 {
6471         struct tg3 *tp = netdev_priv(dev);
6472         int ret;
6473         u32 offset, len, b_offset, odd_len, start, end;
6474         u8 *buf;
6475
6476         if (eeprom->magic != TG3_EEPROM_MAGIC)
6477                 return -EINVAL;
6478
6479         offset = eeprom->offset;
6480         len = eeprom->len;
6481
6482         if ((b_offset = (offset & 3))) {
6483                 /* adjustments to start on required 4 byte boundary */
6484                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6485                 if (ret)
6486                         return ret;
6487                 start = cpu_to_le32(start);
6488                 len += b_offset;
6489                 offset &= ~3;
6490         }
6491
6492         odd_len = 0;
6493         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6494                 /* adjustments to end on required 4 byte boundary */
6495                 odd_len = 1;
6496                 len = (len + 3) & ~3;
6497                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6498                 if (ret)
6499                         return ret;
6500                 end = cpu_to_le32(end);
6501         }
6502
6503         buf = data;
6504         if (b_offset || odd_len) {
6505                 buf = kmalloc(len, GFP_KERNEL);
6506                 if (buf == 0)
6507                         return -ENOMEM;
6508                 if (b_offset)
6509                         memcpy(buf, &start, 4);
6510                 if (odd_len)
6511                         memcpy(buf+len-4, &end, 4);
6512                 memcpy(buf + b_offset, data, eeprom->len);
6513         }
6514
6515         ret = tg3_nvram_write_block(tp, offset, len, buf);
6516
6517         if (buf != data)
6518                 kfree(buf);
6519
6520         return ret;
6521 }
6522
6523 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6524 {
6525         struct tg3 *tp = netdev_priv(dev);
6526   
6527         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6528                                         tp->link_config.phy_is_low_power)
6529                 return -EAGAIN;
6530
6531         cmd->supported = (SUPPORTED_Autoneg);
6532
6533         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6534                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6535                                    SUPPORTED_1000baseT_Full);
6536
6537         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6538                 cmd->supported |= (SUPPORTED_100baseT_Half |
6539                                   SUPPORTED_100baseT_Full |
6540                                   SUPPORTED_10baseT_Half |
6541                                   SUPPORTED_10baseT_Full |
6542                                   SUPPORTED_MII);
6543         else
6544                 cmd->supported |= SUPPORTED_FIBRE;
6545   
6546         cmd->advertising = tp->link_config.advertising;
6547         cmd->speed = tp->link_config.active_speed;
6548         cmd->duplex = tp->link_config.active_duplex;
6549         cmd->port = 0;
6550         cmd->phy_address = PHY_ADDR;
6551         cmd->transceiver = 0;
6552         cmd->autoneg = tp->link_config.autoneg;
6553         cmd->maxtxpkt = 0;
6554         cmd->maxrxpkt = 0;
6555         return 0;
6556 }
6557   
6558 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6559 {
6560         struct tg3 *tp = netdev_priv(dev);
6561   
6562         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6563             tp->link_config.phy_is_low_power)
6564                 return -EAGAIN;
6565
6566         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6567                 /* These are the only valid advertisement bits allowed.  */
6568                 if (cmd->autoneg == AUTONEG_ENABLE &&
6569                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6570                                           ADVERTISED_1000baseT_Full |
6571                                           ADVERTISED_Autoneg |
6572                                           ADVERTISED_FIBRE)))
6573                         return -EINVAL;
6574         }
6575
6576         spin_lock_irq(&tp->lock);
6577         spin_lock(&tp->tx_lock);
6578
6579         tp->link_config.autoneg = cmd->autoneg;
6580         if (cmd->autoneg == AUTONEG_ENABLE) {
6581                 tp->link_config.advertising = cmd->advertising;
6582                 tp->link_config.speed = SPEED_INVALID;
6583                 tp->link_config.duplex = DUPLEX_INVALID;
6584         } else {
6585                 tp->link_config.advertising = 0;
6586                 tp->link_config.speed = cmd->speed;
6587                 tp->link_config.duplex = cmd->duplex;
6588         }
6589   
6590         tg3_setup_phy(tp, 1);
6591         spin_unlock(&tp->tx_lock);
6592         spin_unlock_irq(&tp->lock);
6593   
6594         return 0;
6595 }
6596   
6597 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6598 {
6599         struct tg3 *tp = netdev_priv(dev);
6600   
6601         strcpy(info->driver, DRV_MODULE_NAME);
6602         strcpy(info->version, DRV_MODULE_VERSION);
6603         strcpy(info->bus_info, pci_name(tp->pdev));
6604 }
6605   
6606 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6607 {
6608         struct tg3 *tp = netdev_priv(dev);
6609   
6610         wol->supported = WAKE_MAGIC;
6611         wol->wolopts = 0;
6612         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6613                 wol->wolopts = WAKE_MAGIC;
6614         memset(&wol->sopass, 0, sizeof(wol->sopass));
6615 }
6616   
6617 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6618 {
6619         struct tg3 *tp = netdev_priv(dev);
6620   
6621         if (wol->wolopts & ~WAKE_MAGIC)
6622                 return -EINVAL;
6623         if ((wol->wolopts & WAKE_MAGIC) &&
6624             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6625             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6626                 return -EINVAL;
6627   
6628         spin_lock_irq(&tp->lock);
6629         if (wol->wolopts & WAKE_MAGIC)
6630                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6631         else
6632                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6633         spin_unlock_irq(&tp->lock);
6634   
6635         return 0;
6636 }
6637   
6638 static u32 tg3_get_msglevel(struct net_device *dev)
6639 {
6640         struct tg3 *tp = netdev_priv(dev);
6641         return tp->msg_enable;
6642 }
6643   
6644 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6645 {
6646         struct tg3 *tp = netdev_priv(dev);
6647         tp->msg_enable = value;
6648 }
6649   
6650 #if TG3_TSO_SUPPORT != 0
6651 static int tg3_set_tso(struct net_device *dev, u32 value)
6652 {
6653         struct tg3 *tp = netdev_priv(dev);
6654
6655         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6656                 if (value)
6657                         return -EINVAL;
6658                 return 0;
6659         }
6660         return ethtool_op_set_tso(dev, value);
6661 }
6662 #endif
6663   
6664 static int tg3_nway_reset(struct net_device *dev)
6665 {
6666         struct tg3 *tp = netdev_priv(dev);
6667         u32 bmcr;
6668         int r;
6669   
6670         spin_lock_irq(&tp->lock);
6671         r = -EINVAL;
6672         tg3_readphy(tp, MII_BMCR, &bmcr);
6673         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6674             (bmcr & BMCR_ANENABLE)) {
6675                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6676                 r = 0;
6677         }
6678         spin_unlock_irq(&tp->lock);
6679   
6680         return r;
6681 }
6682   
6683 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6684 {
6685         struct tg3 *tp = netdev_priv(dev);
6686   
6687         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6688         ering->rx_mini_max_pending = 0;
6689         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6690
6691         ering->rx_pending = tp->rx_pending;
6692         ering->rx_mini_pending = 0;
6693         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6694         ering->tx_pending = tp->tx_pending;
6695 }
6696   
6697 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6698 {
6699         struct tg3 *tp = netdev_priv(dev);
6700   
6701         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6702             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6703             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6704                 return -EINVAL;
6705   
6706         tg3_netif_stop(tp);
6707         spin_lock_irq(&tp->lock);
6708         spin_lock(&tp->tx_lock);
6709   
6710         tp->rx_pending = ering->rx_pending;
6711
6712         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6713             tp->rx_pending > 63)
6714                 tp->rx_pending = 63;
6715         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6716         tp->tx_pending = ering->tx_pending;
6717
6718         tg3_halt(tp);
6719         tg3_init_hw(tp);
6720         tg3_netif_start(tp);
6721         spin_unlock(&tp->tx_lock);
6722         spin_unlock_irq(&tp->lock);
6723   
6724         return 0;
6725 }
6726   
6727 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6728 {
6729         struct tg3 *tp = netdev_priv(dev);
6730   
6731         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6732         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6733         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6734 }
6735   
6736 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6737 {
6738         struct tg3 *tp = netdev_priv(dev);
6739   
6740         tg3_netif_stop(tp);
6741         spin_lock_irq(&tp->lock);
6742         spin_lock(&tp->tx_lock);
6743         if (epause->autoneg)
6744                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6745         else
6746                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6747         if (epause->rx_pause)
6748                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6749         else
6750                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6751         if (epause->tx_pause)
6752                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6753         else
6754                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6755         tg3_halt(tp);
6756         tg3_init_hw(tp);
6757         tg3_netif_start(tp);
6758         spin_unlock(&tp->tx_lock);
6759         spin_unlock_irq(&tp->lock);
6760   
6761         return 0;
6762 }
6763   
6764 static u32 tg3_get_rx_csum(struct net_device *dev)
6765 {
6766         struct tg3 *tp = netdev_priv(dev);
6767         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6768 }
6769   
6770 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6771 {
6772         struct tg3 *tp = netdev_priv(dev);
6773   
6774         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6775                 if (data != 0)
6776                         return -EINVAL;
6777                 return 0;
6778         }
6779   
6780         spin_lock_irq(&tp->lock);
6781         if (data)
6782                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6783         else
6784                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6785         spin_unlock_irq(&tp->lock);
6786   
6787         return 0;
6788 }
6789   
6790 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6791 {
6792         struct tg3 *tp = netdev_priv(dev);
6793   
6794         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6795                 if (data != 0)
6796                         return -EINVAL;
6797                 return 0;
6798         }
6799   
6800         if (data)
6801                 dev->features |= NETIF_F_IP_CSUM;
6802         else
6803                 dev->features &= ~NETIF_F_IP_CSUM;
6804
6805         return 0;
6806 }
6807
6808 static int tg3_get_stats_count (struct net_device *dev)
6809 {
6810         return TG3_NUM_STATS;
6811 }
6812
6813 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6814 {
6815         switch (stringset) {
6816         case ETH_SS_STATS:
6817                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6818                 break;
6819         default:
6820                 WARN_ON(1);     /* we need a WARN() */
6821                 break;
6822         }
6823 }
6824
6825 static void tg3_get_ethtool_stats (struct net_device *dev,
6826                                    struct ethtool_stats *estats, u64 *tmp_stats)
6827 {
6828         struct tg3 *tp = netdev_priv(dev);
6829         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6830 }
6831
6832 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6833 {
6834         struct mii_ioctl_data *data = if_mii(ifr);
6835         struct tg3 *tp = netdev_priv(dev);
6836         int err;
6837
6838         switch(cmd) {
6839         case SIOCGMIIPHY:
6840                 data->phy_id = PHY_ADDR;
6841
6842                 /* fallthru */
6843         case SIOCGMIIREG: {
6844                 u32 mii_regval;
6845
6846                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6847                         break;                  /* We have no PHY */
6848
6849                 spin_lock_irq(&tp->lock);
6850                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6851                 spin_unlock_irq(&tp->lock);
6852
6853                 data->val_out = mii_regval;
6854
6855                 return err;
6856         }
6857
6858         case SIOCSMIIREG:
6859                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6860                         break;                  /* We have no PHY */
6861
6862                 if (!capable(CAP_NET_ADMIN))
6863                         return -EPERM;
6864
6865                 spin_lock_irq(&tp->lock);
6866                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6867                 spin_unlock_irq(&tp->lock);
6868
6869                 return err;
6870
6871         default:
6872                 /* do nothing */
6873                 break;
6874         }
6875         return -EOPNOTSUPP;
6876 }
6877
6878 #if TG3_VLAN_TAG_USED
6879 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6880 {
6881         struct tg3 *tp = netdev_priv(dev);
6882
6883         spin_lock_irq(&tp->lock);
6884         spin_lock(&tp->tx_lock);
6885
6886         tp->vlgrp = grp;
6887
6888         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6889         __tg3_set_rx_mode(dev);
6890
6891         spin_unlock(&tp->tx_lock);
6892         spin_unlock_irq(&tp->lock);
6893 }
6894
6895 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6896 {
6897         struct tg3 *tp = netdev_priv(dev);
6898
6899         spin_lock_irq(&tp->lock);
6900         spin_lock(&tp->tx_lock);
6901         if (tp->vlgrp)
6902                 tp->vlgrp->vlan_devices[vid] = NULL;
6903         spin_unlock(&tp->tx_lock);
6904         spin_unlock_irq(&tp->lock);
6905 }
6906 #endif
6907
6908 static struct ethtool_ops tg3_ethtool_ops = {
6909         .get_settings           = tg3_get_settings,
6910         .set_settings           = tg3_set_settings,
6911         .get_drvinfo            = tg3_get_drvinfo,
6912         .get_regs_len           = tg3_get_regs_len,
6913         .get_regs               = tg3_get_regs,
6914         .get_wol                = tg3_get_wol,
6915         .set_wol                = tg3_set_wol,
6916         .get_msglevel           = tg3_get_msglevel,
6917         .set_msglevel           = tg3_set_msglevel,
6918         .nway_reset             = tg3_nway_reset,
6919         .get_link               = ethtool_op_get_link,
6920         .get_eeprom_len         = tg3_get_eeprom_len,
6921         .get_eeprom             = tg3_get_eeprom,
6922         .set_eeprom             = tg3_set_eeprom,
6923         .get_ringparam          = tg3_get_ringparam,
6924         .set_ringparam          = tg3_set_ringparam,
6925         .get_pauseparam         = tg3_get_pauseparam,
6926         .set_pauseparam         = tg3_set_pauseparam,
6927         .get_rx_csum            = tg3_get_rx_csum,
6928         .set_rx_csum            = tg3_set_rx_csum,
6929         .get_tx_csum            = ethtool_op_get_tx_csum,
6930         .set_tx_csum            = tg3_set_tx_csum,
6931         .get_sg                 = ethtool_op_get_sg,
6932         .set_sg                 = ethtool_op_set_sg,
6933 #if TG3_TSO_SUPPORT != 0
6934         .get_tso                = ethtool_op_get_tso,
6935         .set_tso                = tg3_set_tso,
6936 #endif
6937         .get_strings            = tg3_get_strings,
6938         .get_stats_count        = tg3_get_stats_count,
6939         .get_ethtool_stats      = tg3_get_ethtool_stats,
6940 };
6941
6942 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6943 {
6944         u32 cursize, val;
6945
6946         tp->nvram_size = EEPROM_CHIP_SIZE;
6947
6948         if (tg3_nvram_read(tp, 0, &val) != 0)
6949                 return;
6950
6951         if (swab32(val) != TG3_EEPROM_MAGIC)
6952                 return;
6953
6954         /*
6955          * Size the chip by reading offsets at increasing powers of two.
6956          * When we encounter our validation signature, we know the addressing
6957          * has wrapped around, and thus have our chip size.
6958          */
6959         cursize = 0x800;
6960
6961         while (cursize < tp->nvram_size) {
6962                 if (tg3_nvram_read(tp, cursize, &val) != 0)
6963                         return;
6964
6965                 if (swab32(val) == TG3_EEPROM_MAGIC)
6966                         break;
6967
6968                 cursize <<= 1;
6969         }
6970
6971         tp->nvram_size = cursize;
6972 }
6973                 
6974 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
6975 {
6976         u32 val;
6977
6978         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
6979                 if (val != 0) {
6980                         tp->nvram_size = (val >> 16) * 1024;
6981                         return;
6982                 }
6983         }
6984         tp->nvram_size = 0x20000;
6985 }
6986
6987 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
6988 {
6989         u32 nvcfg1;
6990
6991         nvcfg1 = tr32(NVRAM_CFG1);
6992         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6993                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
6994         }
6995         else {
6996                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6997                 tw32(NVRAM_CFG1, nvcfg1);
6998         }
6999
7000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7001                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7002                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7003                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7004                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7005                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7006                                 break;
7007                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7008                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7009                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7010                                 break;
7011                         case FLASH_VENDOR_ATMEL_EEPROM:
7012                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7013                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7014                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7015                                 break;
7016                         case FLASH_VENDOR_ST:
7017                                 tp->nvram_jedecnum = JEDEC_ST;
7018                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7019                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7020                                 break;
7021                         case FLASH_VENDOR_SAIFUN:
7022                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7023                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7024                                 break;
7025                         case FLASH_VENDOR_SST_SMALL:
7026                         case FLASH_VENDOR_SST_LARGE:
7027                                 tp->nvram_jedecnum = JEDEC_SST;
7028                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7029                                 break;
7030                 }
7031         }
7032         else {
7033                 tp->nvram_jedecnum = JEDEC_ATMEL;
7034                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7035                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7036         }
7037 }
7038
7039 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7040 static void __devinit tg3_nvram_init(struct tg3 *tp)
7041 {
7042         int j;
7043
7044         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7045                 return;
7046
7047         tw32_f(GRC_EEPROM_ADDR,
7048              (EEPROM_ADDR_FSM_RESET |
7049               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7050                EEPROM_ADDR_CLKPERD_SHIFT)));
7051
7052         /* XXX schedule_timeout() ... */
7053         for (j = 0; j < 100; j++)
7054                 udelay(10);
7055
7056         /* Enable seeprom accesses. */
7057         tw32_f(GRC_LOCAL_CTRL,
7058              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7059         udelay(100);
7060
7061         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7062             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7063                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7064
7065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7066                         u32 nvaccess = tr32(NVRAM_ACCESS);
7067
7068                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7069                 }
7070
7071                 tg3_get_nvram_info(tp);
7072                 tg3_get_nvram_size(tp);
7073
7074                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7075                         u32 nvaccess = tr32(NVRAM_ACCESS);
7076
7077                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7078                 }
7079
7080         } else {
7081                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7082
7083                 tg3_get_eeprom_size(tp);
7084         }
7085 }
7086
7087 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7088                                         u32 offset, u32 *val)
7089 {
7090         u32 tmp;
7091         int i;
7092
7093         if (offset > EEPROM_ADDR_ADDR_MASK ||
7094             (offset % 4) != 0)
7095                 return -EINVAL;
7096
7097         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7098                                         EEPROM_ADDR_DEVID_MASK |
7099                                         EEPROM_ADDR_READ);
7100         tw32(GRC_EEPROM_ADDR,
7101              tmp |
7102              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7103              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7104               EEPROM_ADDR_ADDR_MASK) |
7105              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7106
7107         for (i = 0; i < 10000; i++) {
7108                 tmp = tr32(GRC_EEPROM_ADDR);
7109
7110                 if (tmp & EEPROM_ADDR_COMPLETE)
7111                         break;
7112                 udelay(100);
7113         }
7114         if (!(tmp & EEPROM_ADDR_COMPLETE))
7115                 return -EBUSY;
7116
7117         *val = tr32(GRC_EEPROM_DATA);
7118         return 0;
7119 }
7120
7121 #define NVRAM_CMD_TIMEOUT 10000
7122
7123 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7124 {
7125         int i;
7126
7127         tw32(NVRAM_CMD, nvram_cmd);
7128         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7129                 udelay(10);
7130                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7131                         udelay(10);
7132                         break;
7133                 }
7134         }
7135         if (i == NVRAM_CMD_TIMEOUT) {
7136                 return -EBUSY;
7137         }
7138         return 0;
7139 }
7140
7141 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7142 {
7143         int ret;
7144
7145         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7146                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7147                 return -EINVAL;
7148         }
7149
7150         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7151                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7152
7153         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7154                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7155                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7156
7157                 offset = ((offset / tp->nvram_pagesize) <<
7158                           ATMEL_AT45DB0X1B_PAGE_POS) +
7159                         (offset % tp->nvram_pagesize);
7160         }
7161
7162         if (offset > NVRAM_ADDR_MSK)
7163                 return -EINVAL;
7164
7165         tg3_nvram_lock(tp);
7166
7167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7168                 u32 nvaccess = tr32(NVRAM_ACCESS);
7169
7170                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7171         }
7172
7173         tw32(NVRAM_ADDR, offset);
7174         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7175                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7176
7177         if (ret == 0)
7178                 *val = swab32(tr32(NVRAM_RDDATA));
7179
7180         tg3_nvram_unlock(tp);
7181
7182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7183                 u32 nvaccess = tr32(NVRAM_ACCESS);
7184
7185                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7186         }
7187
7188         return ret;
7189 }
7190
7191 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7192                                     u32 offset, u32 len, u8 *buf)
7193 {
7194         int i, j, rc = 0;
7195         u32 val;
7196
7197         for (i = 0; i < len; i += 4) {
7198                 u32 addr, data;
7199
7200                 addr = offset + i;
7201
7202                 memcpy(&data, buf + i, 4);
7203
7204                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7205
7206                 val = tr32(GRC_EEPROM_ADDR);
7207                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7208
7209                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7210                         EEPROM_ADDR_READ);
7211                 tw32(GRC_EEPROM_ADDR, val |
7212                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7213                         (addr & EEPROM_ADDR_ADDR_MASK) |
7214                         EEPROM_ADDR_START |
7215                         EEPROM_ADDR_WRITE);
7216                 
7217                 for (j = 0; j < 10000; j++) {
7218                         val = tr32(GRC_EEPROM_ADDR);
7219
7220                         if (val & EEPROM_ADDR_COMPLETE)
7221                                 break;
7222                         udelay(100);
7223                 }
7224                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7225                         rc = -EBUSY;
7226                         break;
7227                 }
7228         }
7229
7230         return rc;
7231 }
7232
7233 /* offset and length are dword aligned */
7234 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7235                 u8 *buf)
7236 {
7237         int ret = 0;
7238         u32 pagesize = tp->nvram_pagesize;
7239         u32 pagemask = pagesize - 1;
7240         u32 nvram_cmd;
7241         u8 *tmp;
7242
7243         tmp = kmalloc(pagesize, GFP_KERNEL);
7244         if (tmp == NULL)
7245                 return -ENOMEM;
7246
7247         while (len) {
7248                 int j;
7249                 u32 phy_addr, page_off, size, nvaccess;
7250
7251                 phy_addr = offset & ~pagemask;
7252         
7253                 for (j = 0; j < pagesize; j += 4) {
7254                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7255                                                 (u32 *) (tmp + j))))
7256                                 break;
7257                 }
7258                 if (ret)
7259                         break;
7260
7261                 page_off = offset & pagemask;
7262                 size = pagesize;
7263                 if (len < size)
7264                         size = len;
7265
7266                 len -= size;
7267
7268                 memcpy(tmp + page_off, buf, size);
7269
7270                 offset = offset + (pagesize - page_off);
7271
7272                 nvaccess = tr32(NVRAM_ACCESS);
7273                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7274
7275                 /*
7276                  * Before we can erase the flash page, we need
7277                  * to issue a special "write enable" command.
7278                  */
7279                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7280
7281                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7282                         break;
7283
7284                 /* Erase the target page */
7285                 tw32(NVRAM_ADDR, phy_addr);
7286
7287                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7288                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7289
7290                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7291                         break;
7292
7293                 /* Issue another write enable to start the write. */
7294                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7295
7296                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7297                         break;
7298
7299                 for (j = 0; j < pagesize; j += 4) {
7300                         u32 data;
7301
7302                         data = *((u32 *) (tmp + j));
7303                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7304
7305                         tw32(NVRAM_ADDR, phy_addr + j);
7306
7307                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7308                                 NVRAM_CMD_WR;
7309
7310                         if (j == 0)
7311                                 nvram_cmd |= NVRAM_CMD_FIRST;
7312                         else if (j == (pagesize - 4))
7313                                 nvram_cmd |= NVRAM_CMD_LAST;
7314
7315                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7316                                 break;
7317                 }
7318                 if (ret)
7319                         break;
7320         }
7321
7322         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7323         tg3_nvram_exec_cmd(tp, nvram_cmd);
7324
7325         kfree(tmp);
7326
7327         return ret;
7328 }
7329
7330 /* offset and length are dword aligned */
7331 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7332                 u8 *buf)
7333 {
7334         int i, ret = 0;
7335
7336         for (i = 0; i < len; i += 4, offset += 4) {
7337                 u32 data, page_off, phy_addr, nvram_cmd;
7338
7339                 memcpy(&data, buf + i, 4);
7340                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7341
7342                 page_off = offset % tp->nvram_pagesize;
7343
7344                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7345                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7346
7347                         phy_addr = ((offset / tp->nvram_pagesize) <<
7348                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7349                 }
7350                 else {
7351                         phy_addr = offset;
7352                 }
7353
7354                 tw32(NVRAM_ADDR, phy_addr);
7355
7356                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7357
7358                 if ((page_off == 0) || (i == 0))
7359                         nvram_cmd |= NVRAM_CMD_FIRST;
7360                 else if (page_off == (tp->nvram_pagesize - 4))
7361                         nvram_cmd |= NVRAM_CMD_LAST;
7362
7363                 if (i == (len - 4))
7364                         nvram_cmd |= NVRAM_CMD_LAST;
7365
7366                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7367                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7368
7369                         if ((ret = tg3_nvram_exec_cmd(tp,
7370                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7371                                 NVRAM_CMD_DONE)))
7372
7373                                 break;
7374                 }
7375                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7376                         /* We always do complete word writes to eeprom. */
7377                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7378                 }
7379
7380                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7381                         break;
7382         }
7383         return ret;
7384 }
7385
7386 /* offset and length are dword aligned */
7387 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7388 {
7389         int ret;
7390
7391         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7392                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7393                 return -EINVAL;
7394         }
7395
7396         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7397                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7398                        GRC_LCLCTRL_GPIO_OE1);
7399                 udelay(40);
7400         }
7401
7402         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7403                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7404         }
7405         else {
7406                 u32 grc_mode;
7407
7408                 tg3_nvram_lock(tp);
7409
7410                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7411                         u32 nvaccess = tr32(NVRAM_ACCESS);
7412
7413                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7414
7415                         tw32(NVRAM_WRITE1, 0x406);
7416                 }
7417
7418                 grc_mode = tr32(GRC_MODE);
7419                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7420
7421                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7422                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7423
7424                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7425                                 buf);
7426                 }
7427                 else {
7428                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7429                                 buf);
7430                 }
7431
7432                 grc_mode = tr32(GRC_MODE);
7433                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7434
7435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7436                         u32 nvaccess = tr32(NVRAM_ACCESS);
7437
7438                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7439                 }
7440                 tg3_nvram_unlock(tp);
7441         }
7442
7443         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7444                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7445                        GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7446                 udelay(40);
7447         }
7448
7449         return ret;
7450 }
7451
7452 struct subsys_tbl_ent {
7453         u16 subsys_vendor, subsys_devid;
7454         u32 phy_id;
7455 };
7456
7457 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7458         /* Broadcom boards. */
7459         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7460         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7461         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7462         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7463         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7464         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7465         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7466         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7467         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7468         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7469         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7470
7471         /* 3com boards. */
7472         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7473         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7474         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7475         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7476         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7477
7478         /* DELL boards. */
7479         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7480         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7481         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7482         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7483
7484         /* Compaq boards. */
7485         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7486         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7487         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7488         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7489         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7490
7491         /* IBM boards. */
7492         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7493 };
7494
7495 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7496 {
7497         int i;
7498
7499         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7500                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7501                      tp->pdev->subsystem_vendor) &&
7502                     (subsys_id_to_phy_id[i].subsys_devid ==
7503                      tp->pdev->subsystem_device))
7504                         return &subsys_id_to_phy_id[i];
7505         }
7506         return NULL;
7507 }
7508
7509 static int __devinit tg3_phy_probe(struct tg3 *tp)
7510 {
7511         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7512         u32 hw_phy_id, hw_phy_id_masked;
7513         u32 val;
7514         int eeprom_signature_found, eeprom_phy_serdes, err;
7515
7516         tp->phy_id = PHY_ID_INVALID;
7517         eeprom_phy_id = PHY_ID_INVALID;
7518         eeprom_phy_serdes = 0;
7519         eeprom_signature_found = 0;
7520         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7521         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7522                 u32 nic_cfg, led_cfg;
7523                 u32 nic_phy_id, ver, cfg2 = 0;
7524
7525                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7526                 tp->nic_sram_data_cfg = nic_cfg;
7527
7528                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7529                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7530                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7531                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7532                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7533                     (ver > 0) && (ver < 0x100))
7534                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7535
7536                 eeprom_signature_found = 1;
7537
7538                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7539                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7540                         eeprom_phy_serdes = 1;
7541
7542                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7543                 if (nic_phy_id != 0) {
7544                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7545                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7546
7547                         eeprom_phy_id  = (id1 >> 16) << 10;
7548                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7549                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7550                 } else
7551                         eeprom_phy_id = 0;
7552
7553                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7554                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7555                                     SHASTA_EXT_LED_MODE_MASK);
7556                 } else
7557                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7558
7559                 switch (led_cfg) {
7560                 default:
7561                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7562                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7563                         break;
7564
7565                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7566                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7567                         break;
7568
7569                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7570                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7571                         break;
7572
7573                 case SHASTA_EXT_LED_SHARED:
7574                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7575                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7576                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7577                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7578                                                  LED_CTRL_MODE_PHY_2);
7579                         break;
7580
7581                 case SHASTA_EXT_LED_MAC:
7582                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7583                         break;
7584
7585                 case SHASTA_EXT_LED_COMBO:
7586                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7587                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7588                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7589                                                  LED_CTRL_MODE_PHY_2);
7590                         break;
7591
7592                 };
7593
7594                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7595                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7596                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7597                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7598
7599                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7600                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7601                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7602                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7603
7604                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7605                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7606                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7607                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7608                 }
7609                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7610                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7611
7612                 if (cfg2 & (1 << 17))
7613                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7614
7615                 /* serdes signal pre-emphasis in register 0x590 set by */
7616                 /* bootcode if bit 18 is set */
7617                 if (cfg2 & (1 << 18))
7618                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7619         }
7620
7621         /* Reading the PHY ID register can conflict with ASF
7622          * firwmare access to the PHY hardware.
7623          */
7624         err = 0;
7625         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7626                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7627         } else {
7628                 /* Now read the physical PHY_ID from the chip and verify
7629                  * that it is sane.  If it doesn't look good, we fall back
7630                  * to either the hard-coded table based PHY_ID and failing
7631                  * that the value found in the eeprom area.
7632                  */
7633                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7634                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7635
7636                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7637                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7638                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7639
7640                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7641         }
7642
7643         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7644                 tp->phy_id = hw_phy_id;
7645                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7646                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7647         } else {
7648                 if (eeprom_signature_found) {
7649                         tp->phy_id = eeprom_phy_id;
7650                         if (eeprom_phy_serdes)
7651                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7652                 } else {
7653                         struct subsys_tbl_ent *p;
7654
7655                         /* No eeprom signature?  Try the hardcoded
7656                          * subsys device table.
7657                          */
7658                         p = lookup_by_subsys(tp);
7659                         if (!p)
7660                                 return -ENODEV;
7661
7662                         tp->phy_id = p->phy_id;
7663                         if (!tp->phy_id ||
7664                             tp->phy_id == PHY_ID_BCM8002)
7665                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7666                 }
7667         }
7668
7669         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7670             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7671                 u32 bmsr, adv_reg, tg3_ctrl;
7672
7673                 tg3_readphy(tp, MII_BMSR, &bmsr);
7674                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7675                     (bmsr & BMSR_LSTATUS))
7676                         goto skip_phy_reset;
7677                     
7678                 err = tg3_phy_reset(tp);
7679                 if (err)
7680                         return err;
7681
7682                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7683                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7684                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7685                 tg3_ctrl = 0;
7686                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7687                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7688                                     MII_TG3_CTRL_ADV_1000_FULL);
7689                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7690                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7691                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7692                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7693                 }
7694
7695                 if (!tg3_copper_is_advertising_all(tp)) {
7696                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7697
7698                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7699                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7700
7701                         tg3_writephy(tp, MII_BMCR,
7702                                      BMCR_ANENABLE | BMCR_ANRESTART);
7703                 }
7704                 tg3_phy_set_wirespeed(tp);
7705
7706                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7707                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7708                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7709         }
7710
7711 skip_phy_reset:
7712         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7713                 err = tg3_init_5401phy_dsp(tp);
7714                 if (err)
7715                         return err;
7716         }
7717
7718         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7719                 err = tg3_init_5401phy_dsp(tp);
7720         }
7721
7722         if (!eeprom_signature_found)
7723                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7724
7725         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7726                 tp->link_config.advertising =
7727                         (ADVERTISED_1000baseT_Half |
7728                          ADVERTISED_1000baseT_Full |
7729                          ADVERTISED_Autoneg |
7730                          ADVERTISED_FIBRE);
7731         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7732                 tp->link_config.advertising &=
7733                         ~(ADVERTISED_1000baseT_Half |
7734                           ADVERTISED_1000baseT_Full);
7735
7736         return err;
7737 }
7738
7739 static void __devinit tg3_read_partno(struct tg3 *tp)
7740 {
7741         unsigned char vpd_data[256];
7742         int i;
7743
7744         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7745                 /* Sun decided not to put the necessary bits in the
7746                  * NVRAM of their onboard tg3 parts :(
7747                  */
7748                 strcpy(tp->board_part_number, "Sun 570X");
7749                 return;
7750         }
7751
7752         for (i = 0; i < 256; i += 4) {
7753                 u32 tmp;
7754
7755                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7756                         goto out_not_found;
7757
7758                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7759                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7760                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7761                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7762         }
7763
7764         /* Now parse and find the part number. */
7765         for (i = 0; i < 256; ) {
7766                 unsigned char val = vpd_data[i];
7767                 int block_end;
7768
7769                 if (val == 0x82 || val == 0x91) {
7770                         i = (i + 3 +
7771                              (vpd_data[i + 1] +
7772                               (vpd_data[i + 2] << 8)));
7773                         continue;
7774                 }
7775
7776                 if (val != 0x90)
7777                         goto out_not_found;
7778
7779                 block_end = (i + 3 +
7780                              (vpd_data[i + 1] +
7781                               (vpd_data[i + 2] << 8)));
7782                 i += 3;
7783                 while (i < block_end) {
7784                         if (vpd_data[i + 0] == 'P' &&
7785                             vpd_data[i + 1] == 'N') {
7786                                 int partno_len = vpd_data[i + 2];
7787
7788                                 if (partno_len > 24)
7789                                         goto out_not_found;
7790
7791                                 memcpy(tp->board_part_number,
7792                                        &vpd_data[i + 3],
7793                                        partno_len);
7794
7795                                 /* Success. */
7796                                 return;
7797                         }
7798                 }
7799
7800                 /* Part number not found. */
7801                 goto out_not_found;
7802         }
7803
7804 out_not_found:
7805         strcpy(tp->board_part_number, "none");
7806 }
7807
7808 #ifdef CONFIG_SPARC64
7809 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7810 {
7811         struct pci_dev *pdev = tp->pdev;
7812         struct pcidev_cookie *pcp = pdev->sysdata;
7813
7814         if (pcp != NULL) {
7815                 int node = pcp->prom_node;
7816                 u32 venid;
7817                 int err;
7818
7819                 err = prom_getproperty(node, "subsystem-vendor-id",
7820                                        (char *) &venid, sizeof(venid));
7821                 if (err == 0 || err == -1)
7822                         return 0;
7823                 if (venid == PCI_VENDOR_ID_SUN)
7824                         return 1;
7825         }
7826         return 0;
7827 }
7828 #endif
7829
7830 static int __devinit tg3_get_invariants(struct tg3 *tp)
7831 {
7832         u32 misc_ctrl_reg;
7833         u32 cacheline_sz_reg;
7834         u32 pci_state_reg, grc_misc_cfg;
7835         u32 val;
7836         u16 pci_cmd;
7837         int err;
7838
7839 #ifdef CONFIG_SPARC64
7840         if (tg3_is_sun_570X(tp))
7841                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7842 #endif
7843
7844         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7845          * reordering to the mailbox registers done by the host
7846          * controller can cause major troubles.  We read back from
7847          * every mailbox register write to force the writes to be
7848          * posted to the chip in order.
7849          */
7850         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7851                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7852             pci_find_device(PCI_VENDOR_ID_INTEL,
7853                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7854             pci_find_device(PCI_VENDOR_ID_INTEL,
7855                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7856             pci_find_device(PCI_VENDOR_ID_INTEL,
7857                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7858             pci_find_device(PCI_VENDOR_ID_AMD,
7859                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7860                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7861
7862         /* Force memory write invalidate off.  If we leave it on,
7863          * then on 5700_BX chips we have to enable a workaround.
7864          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7865          * to match the cacheline size.  The Broadcom driver have this
7866          * workaround but turns MWI off all the times so never uses
7867          * it.  This seems to suggest that the workaround is insufficient.
7868          */
7869         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7870         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7871         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7872
7873         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7874          * has the register indirect write enable bit set before
7875          * we try to access any of the MMIO registers.  It is also
7876          * critical that the PCI-X hw workaround situation is decided
7877          * before that as well.
7878          */
7879         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7880                               &misc_ctrl_reg);
7881
7882         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7883                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7884
7885         /* Initialize misc host control in PCI block. */
7886         tp->misc_host_ctrl |= (misc_ctrl_reg &
7887                                MISC_HOST_CTRL_CHIPREV);
7888         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7889                                tp->misc_host_ctrl);
7890
7891         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7892                               &cacheline_sz_reg);
7893
7894         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7895         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7896         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7897         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7898
7899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7900                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7901
7902         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7903                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7904
7905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7906             tp->pci_lat_timer < 64) {
7907                 tp->pci_lat_timer = 64;
7908
7909                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7910                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7911                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7912                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7913
7914                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7915                                        cacheline_sz_reg);
7916         }
7917
7918         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7919                               &pci_state_reg);
7920
7921         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7922                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7923
7924                 /* If this is a 5700 BX chipset, and we are in PCI-X
7925                  * mode, enable register write workaround.
7926                  *
7927                  * The workaround is to use indirect register accesses
7928                  * for all chip writes not to mailbox registers.
7929                  */
7930                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7931                         u32 pm_reg;
7932                         u16 pci_cmd;
7933
7934                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7935
7936                         /* The chip can have it's power management PCI config
7937                          * space registers clobbered due to this bug.
7938                          * So explicitly force the chip into D0 here.
7939                          */
7940                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7941                                               &pm_reg);
7942                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7943                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7944                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7945                                                pm_reg);
7946
7947                         /* Also, force SERR#/PERR# in PCI command. */
7948                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7949                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7950                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7951                 }
7952         }
7953
7954         /* Back to back register writes can cause problems on this chip,
7955          * the workaround is to read back all reg writes except those to
7956          * mailbox regs.  See tg3_write_indirect_reg32().
7957          *
7958          * PCI Express 5750_A0 rev chips need this workaround too.
7959          */
7960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7961             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7962              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7963                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7964
7965         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7966                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7967         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7968                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7969
7970         /* Chip-specific fixup from Broadcom driver */
7971         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7972             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7973                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7974                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7975         }
7976
7977         /* Force the chip into D0. */
7978         err = tg3_set_power_state(tp, 0);
7979         if (err) {
7980                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7981                        pci_name(tp->pdev));
7982                 return err;
7983         }
7984
7985         /* 5700 B0 chips do not support checksumming correctly due
7986          * to hardware bugs.
7987          */
7988         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7989                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7990
7991         /* Pseudo-header checksum is done by hardware logic and not
7992          * the offload processers, so make the chip do the pseudo-
7993          * header checksums on receive.  For transmit it is more
7994          * convenient to do the pseudo-header checksum in software
7995          * as Linux does that on transmit for us in all cases.
7996          */
7997         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7998         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7999
8000         /* Derive initial jumbo mode from MTU assigned in
8001          * ether_setup() via the alloc_etherdev() call
8002          */
8003         if (tp->dev->mtu > ETH_DATA_LEN)
8004                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8005
8006         /* Determine WakeOnLan speed to use. */
8007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8008             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8009             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8010             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8011                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8012         } else {
8013                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8014         }
8015
8016         /* A few boards don't want Ethernet@WireSpeed phy feature */
8017         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8018             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8019              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8020              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8021                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8022
8023         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8024             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8025                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8026         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8027                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8028
8029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8031                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8032
8033         /* Only 5701 and later support tagged irq status mode.
8034          * Also, 5788 chips cannot use tagged irq status.
8035          *
8036          * However, since we are using NAPI avoid tagged irq status
8037          * because the interrupt condition is more difficult to
8038          * fully clear in that mode.
8039          */
8040         tp->coalesce_mode = 0;
8041
8042         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8043             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8044                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8045
8046         /* Initialize MAC MI mode, polling disabled. */
8047         tw32_f(MAC_MI_MODE, tp->mi_mode);
8048         udelay(80);
8049
8050         /* Initialize data/descriptor byte/word swapping. */
8051         val = tr32(GRC_MODE);
8052         val &= GRC_MODE_HOST_STACKUP;
8053         tw32(GRC_MODE, val | tp->grc_mode);
8054
8055         tg3_switch_clocks(tp);
8056
8057         /* Clear this out for sanity. */
8058         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8059
8060         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8061                               &pci_state_reg);
8062         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8063             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8064                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8065
8066                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8067                     chiprevid == CHIPREV_ID_5701_B0 ||
8068                     chiprevid == CHIPREV_ID_5701_B2 ||
8069                     chiprevid == CHIPREV_ID_5701_B5) {
8070                         void __iomem *sram_base;
8071
8072                         /* Write some dummy words into the SRAM status block
8073                          * area, see if it reads back correctly.  If the return
8074                          * value is bad, force enable the PCIX workaround.
8075                          */
8076                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8077
8078                         writel(0x00000000, sram_base);
8079                         writel(0x00000000, sram_base + 4);
8080                         writel(0xffffffff, sram_base + 4);
8081                         if (readl(sram_base) != 0x00000000)
8082                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8083                 }
8084         }
8085
8086         udelay(50);
8087         tg3_nvram_init(tp);
8088
8089         grc_misc_cfg = tr32(GRC_MISC_CFG);
8090         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8091
8092         /* Broadcom's driver says that CIOBE multisplit has a bug */
8093 #if 0
8094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8095             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8096                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8097                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8098         }
8099 #endif
8100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8101             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8102              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8103                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8104
8105         /* these are limited to 10/100 only */
8106         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8107              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8108             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8109              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8110              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8111               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8112               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8113             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8114              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8115               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8116                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8117
8118         err = tg3_phy_probe(tp);
8119         if (err) {
8120                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8121                        pci_name(tp->pdev), err);
8122                 /* ... but do not return immediately ... */
8123         }
8124
8125         tg3_read_partno(tp);
8126
8127         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8128                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8129         } else {
8130                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8131                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8132                 else
8133                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8134         }
8135
8136         /* 5700 {AX,BX} chips have a broken status block link
8137          * change bit implementation, so we must use the
8138          * status register in those cases.
8139          */
8140         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8141                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8142         else
8143                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8144
8145         /* The led_ctrl is set during tg3_phy_probe, here we might
8146          * have to force the link status polling mechanism based
8147          * upon subsystem IDs.
8148          */
8149         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8150             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8151                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8152                                   TG3_FLAG_USE_LINKCHG_REG);
8153         }
8154
8155         /* For all SERDES we poll the MAC status register. */
8156         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8157                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8158         else
8159                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8160
8161         /* 5700 BX chips need to have their TX producer index mailboxes
8162          * written twice to workaround a bug.
8163          */
8164         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8165                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8166         else
8167                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8168
8169         /* It seems all chips can get confused if TX buffers
8170          * straddle the 4GB address boundary in some cases.
8171          */
8172         tp->dev->hard_start_xmit = tg3_start_xmit;
8173
8174         tp->rx_offset = 2;
8175         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8176             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8177                 tp->rx_offset = 0;
8178
8179         /* By default, disable wake-on-lan.  User can change this
8180          * using ETHTOOL_SWOL.
8181          */
8182         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8183
8184         return err;
8185 }
8186
8187 #ifdef CONFIG_SPARC64
8188 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8189 {
8190         struct net_device *dev = tp->dev;
8191         struct pci_dev *pdev = tp->pdev;
8192         struct pcidev_cookie *pcp = pdev->sysdata;
8193
8194         if (pcp != NULL) {
8195                 int node = pcp->prom_node;
8196
8197                 if (prom_getproplen(node, "local-mac-address") == 6) {
8198                         prom_getproperty(node, "local-mac-address",
8199                                          dev->dev_addr, 6);
8200                         return 0;
8201                 }
8202         }
8203         return -ENODEV;
8204 }
8205
8206 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8207 {
8208         struct net_device *dev = tp->dev;
8209
8210         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8211         return 0;
8212 }
8213 #endif
8214
8215 static int __devinit tg3_get_device_address(struct tg3 *tp)
8216 {
8217         struct net_device *dev = tp->dev;
8218         u32 hi, lo, mac_offset;
8219
8220 #ifdef CONFIG_SPARC64
8221         if (!tg3_get_macaddr_sparc(tp))
8222                 return 0;
8223 #endif
8224
8225         mac_offset = 0x7c;
8226         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8227             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8228                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8229                         mac_offset = 0xcc;
8230                 if (tg3_nvram_lock(tp))
8231                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8232                 else
8233                         tg3_nvram_unlock(tp);
8234         }
8235
8236         /* First try to get it from MAC address mailbox. */
8237         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8238         if ((hi >> 16) == 0x484b) {
8239                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8240                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8241
8242                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8243                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8244                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8245                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8246                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8247         }
8248         /* Next, try NVRAM. */
8249         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8250                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8251                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8252                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8253                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8254                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8255                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8256                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8257                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8258         }
8259         /* Finally just fetch it out of the MAC control regs. */
8260         else {
8261                 hi = tr32(MAC_ADDR_0_HIGH);
8262                 lo = tr32(MAC_ADDR_0_LOW);
8263
8264                 dev->dev_addr[5] = lo & 0xff;
8265                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8266                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8267                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8268                 dev->dev_addr[1] = hi & 0xff;
8269                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8270         }
8271
8272         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8273 #ifdef CONFIG_SPARC64
8274                 if (!tg3_get_default_macaddr_sparc(tp))
8275                         return 0;
8276 #endif
8277                 return -EINVAL;
8278         }
8279         return 0;
8280 }
8281
8282 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8283 {
8284         struct tg3_internal_buffer_desc test_desc;
8285         u32 sram_dma_descs;
8286         int i, ret;
8287
8288         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8289
8290         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8291         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8292         tw32(RDMAC_STATUS, 0);
8293         tw32(WDMAC_STATUS, 0);
8294
8295         tw32(BUFMGR_MODE, 0);
8296         tw32(FTQ_RESET, 0);
8297
8298         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8299         test_desc.addr_lo = buf_dma & 0xffffffff;
8300         test_desc.nic_mbuf = 0x00002100;
8301         test_desc.len = size;
8302
8303         /*
8304          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8305          * the *second* time the tg3 driver was getting loaded after an
8306          * initial scan.
8307          *
8308          * Broadcom tells me:
8309          *   ...the DMA engine is connected to the GRC block and a DMA
8310          *   reset may affect the GRC block in some unpredictable way...
8311          *   The behavior of resets to individual blocks has not been tested.
8312          *
8313          * Broadcom noted the GRC reset will also reset all sub-components.
8314          */
8315         if (to_device) {
8316                 test_desc.cqid_sqid = (13 << 8) | 2;
8317
8318                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8319                 udelay(40);
8320         } else {
8321                 test_desc.cqid_sqid = (16 << 8) | 7;
8322
8323                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8324                 udelay(40);
8325         }
8326         test_desc.flags = 0x00000005;
8327
8328         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8329                 u32 val;
8330
8331                 val = *(((u32 *)&test_desc) + i);
8332                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8333                                        sram_dma_descs + (i * sizeof(u32)));
8334                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8335         }
8336         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8337
8338         if (to_device) {
8339                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8340         } else {
8341                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8342         }
8343
8344         ret = -ENODEV;
8345         for (i = 0; i < 40; i++) {
8346                 u32 val;
8347
8348                 if (to_device)
8349                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8350                 else
8351                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8352                 if ((val & 0xffff) == sram_dma_descs) {
8353                         ret = 0;
8354                         break;
8355                 }
8356
8357                 udelay(100);
8358         }
8359
8360         return ret;
8361 }
8362
8363 #define TEST_BUFFER_SIZE        0x400
8364
8365 static int __devinit tg3_test_dma(struct tg3 *tp)
8366 {
8367         dma_addr_t buf_dma;
8368         u32 *buf;
8369         int ret;
8370
8371         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8372         if (!buf) {
8373                 ret = -ENOMEM;
8374                 goto out_nofree;
8375         }
8376
8377         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8378                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8379
8380 #ifndef CONFIG_X86
8381         {
8382                 u8 byte;
8383                 int cacheline_size;
8384                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8385
8386                 if (byte == 0)
8387                         cacheline_size = 1024;
8388                 else
8389                         cacheline_size = (int) byte * 4;
8390
8391                 switch (cacheline_size) {
8392                 case 16:
8393                 case 32:
8394                 case 64:
8395                 case 128:
8396                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8397                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8398                                 tp->dma_rwctrl |=
8399                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8400                                 break;
8401                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8402                                 tp->dma_rwctrl &=
8403                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8404                                 tp->dma_rwctrl |=
8405                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8406                                 break;
8407                         }
8408                         /* fallthrough */
8409                 case 256:
8410                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8411                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8412                                 tp->dma_rwctrl |=
8413                                         DMA_RWCTRL_WRITE_BNDRY_256;
8414                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8415                                 tp->dma_rwctrl |=
8416                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8417                 };
8418         }
8419 #endif
8420
8421         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8422                 /* DMA read watermark not used on PCIE */
8423                 tp->dma_rwctrl |= 0x00180000;
8424         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8425                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8426                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8427                         tp->dma_rwctrl |= 0x003f0000;
8428                 else
8429                         tp->dma_rwctrl |= 0x003f000f;
8430         } else {
8431                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8432                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8433                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8434
8435                         if (ccval == 0x6 || ccval == 0x7)
8436                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8437
8438                         /* Set bit 23 to renable PCIX hw bug fix */
8439                         tp->dma_rwctrl |= 0x009f0000;
8440                 } else {
8441                         tp->dma_rwctrl |= 0x001b000f;
8442                 }
8443         }
8444
8445         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8446             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8447                 tp->dma_rwctrl &= 0xfffffff0;
8448
8449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8451                 /* Remove this if it causes problems for some boards. */
8452                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8453
8454                 /* On 5700/5701 chips, we need to set this bit.
8455                  * Otherwise the chip will issue cacheline transactions
8456                  * to streamable DMA memory with not all the byte
8457                  * enables turned on.  This is an error on several
8458                  * RISC PCI controllers, in particular sparc64.
8459                  *
8460                  * On 5703/5704 chips, this bit has been reassigned
8461                  * a different meaning.  In particular, it is used
8462                  * on those chips to enable a PCI-X workaround.
8463                  */
8464                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8465         }
8466
8467         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8468
8469 #if 0
8470         /* Unneeded, already done by tg3_get_invariants.  */
8471         tg3_switch_clocks(tp);
8472 #endif
8473
8474         ret = 0;
8475         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8476             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8477                 goto out;
8478
8479         while (1) {
8480                 u32 *p = buf, i;
8481
8482                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8483                         p[i] = i;
8484
8485                 /* Send the buffer to the chip. */
8486                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8487                 if (ret) {
8488                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8489                         break;
8490                 }
8491
8492 #if 0
8493                 /* validate data reached card RAM correctly. */
8494                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8495                         u32 val;
8496                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8497                         if (le32_to_cpu(val) != p[i]) {
8498                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8499                                 /* ret = -ENODEV here? */
8500                         }
8501                         p[i] = 0;
8502                 }
8503 #endif
8504                 /* Now read it back. */
8505                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8506                 if (ret) {
8507                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8508
8509                         break;
8510                 }
8511
8512                 /* Verify it. */
8513                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8514                         if (p[i] == i)
8515                                 continue;
8516
8517                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8518                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8519                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8520                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8521                                 break;
8522                         } else {
8523                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8524                                 ret = -ENODEV;
8525                                 goto out;
8526                         }
8527                 }
8528
8529                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8530                         /* Success. */
8531                         ret = 0;
8532                         break;
8533                 }
8534         }
8535
8536 out:
8537         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8538 out_nofree:
8539         return ret;
8540 }
8541
8542 static void __devinit tg3_init_link_config(struct tg3 *tp)
8543 {
8544         tp->link_config.advertising =
8545                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8546                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8547                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8548                  ADVERTISED_Autoneg | ADVERTISED_MII);
8549         tp->link_config.speed = SPEED_INVALID;
8550         tp->link_config.duplex = DUPLEX_INVALID;
8551         tp->link_config.autoneg = AUTONEG_ENABLE;
8552         netif_carrier_off(tp->dev);
8553         tp->link_config.active_speed = SPEED_INVALID;
8554         tp->link_config.active_duplex = DUPLEX_INVALID;
8555         tp->link_config.phy_is_low_power = 0;
8556         tp->link_config.orig_speed = SPEED_INVALID;
8557         tp->link_config.orig_duplex = DUPLEX_INVALID;
8558         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8559 }
8560
8561 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8562 {
8563         tp->bufmgr_config.mbuf_read_dma_low_water =
8564                 DEFAULT_MB_RDMA_LOW_WATER;
8565         tp->bufmgr_config.mbuf_mac_rx_low_water =
8566                 DEFAULT_MB_MACRX_LOW_WATER;
8567         tp->bufmgr_config.mbuf_high_water =
8568                 DEFAULT_MB_HIGH_WATER;
8569
8570         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8571                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8572         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8573                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8574         tp->bufmgr_config.mbuf_high_water_jumbo =
8575                 DEFAULT_MB_HIGH_WATER_JUMBO;
8576
8577         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8578         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8579 }
8580
8581 static char * __devinit tg3_phy_string(struct tg3 *tp)
8582 {
8583         switch (tp->phy_id & PHY_ID_MASK) {
8584         case PHY_ID_BCM5400:    return "5400";
8585         case PHY_ID_BCM5401:    return "5401";
8586         case PHY_ID_BCM5411:    return "5411";
8587         case PHY_ID_BCM5701:    return "5701";
8588         case PHY_ID_BCM5703:    return "5703";
8589         case PHY_ID_BCM5704:    return "5704";
8590         case PHY_ID_BCM5705:    return "5705";
8591         case PHY_ID_BCM5750:    return "5750";
8592         case PHY_ID_BCM8002:    return "8002/serdes";
8593         case 0:                 return "serdes";
8594         default:                return "unknown";
8595         };
8596 }
8597
8598 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8599 {
8600         struct pci_dev *peer;
8601         unsigned int func, devnr = tp->pdev->devfn & ~7;
8602
8603         for (func = 0; func < 8; func++) {
8604                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8605                 if (peer && peer != tp->pdev)
8606                         break;
8607                 pci_dev_put(peer);
8608         }
8609         if (!peer || peer == tp->pdev)
8610                 BUG();
8611
8612         /*
8613          * We don't need to keep the refcount elevated; there's no way
8614          * to remove one half of this device without removing the other
8615          */
8616         pci_dev_put(peer);
8617
8618         return peer;
8619 }
8620
8621 static int __devinit tg3_init_one(struct pci_dev *pdev,
8622                                   const struct pci_device_id *ent)
8623 {
8624         static int tg3_version_printed = 0;
8625         unsigned long tg3reg_base, tg3reg_len;
8626         struct net_device *dev;
8627         struct tg3 *tp;
8628         int i, err, pci_using_dac, pm_cap;
8629
8630         if (tg3_version_printed++ == 0)
8631                 printk(KERN_INFO "%s", version);
8632
8633         err = pci_enable_device(pdev);
8634         if (err) {
8635                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8636                        "aborting.\n");
8637                 return err;
8638         }
8639
8640         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8641                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8642                        "base address, aborting.\n");
8643                 err = -ENODEV;
8644                 goto err_out_disable_pdev;
8645         }
8646
8647         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8648         if (err) {
8649                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8650                        "aborting.\n");
8651                 goto err_out_disable_pdev;
8652         }
8653
8654         pci_set_master(pdev);
8655
8656         /* Find power-management capability. */
8657         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8658         if (pm_cap == 0) {
8659                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8660                        "aborting.\n");
8661                 err = -EIO;
8662                 goto err_out_free_res;
8663         }
8664
8665         /* Configure DMA attributes. */
8666         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8667         if (!err) {
8668                 pci_using_dac = 1;
8669                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8670                 if (err < 0) {
8671                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8672                                "for consistent allocations\n");
8673                         goto err_out_free_res;
8674                 }
8675         } else {
8676                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8677                 if (err) {
8678                         printk(KERN_ERR PFX "No usable DMA configuration, "
8679                                "aborting.\n");
8680                         goto err_out_free_res;
8681                 }
8682                 pci_using_dac = 0;
8683         }
8684
8685         tg3reg_base = pci_resource_start(pdev, 0);
8686         tg3reg_len = pci_resource_len(pdev, 0);
8687
8688         dev = alloc_etherdev(sizeof(*tp));
8689         if (!dev) {
8690                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8691                 err = -ENOMEM;
8692                 goto err_out_free_res;
8693         }
8694
8695         SET_MODULE_OWNER(dev);
8696         SET_NETDEV_DEV(dev, &pdev->dev);
8697
8698         if (pci_using_dac)
8699                 dev->features |= NETIF_F_HIGHDMA;
8700         dev->features |= NETIF_F_LLTX;
8701 #if TG3_VLAN_TAG_USED
8702         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8703         dev->vlan_rx_register = tg3_vlan_rx_register;
8704         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8705 #endif
8706
8707         tp = netdev_priv(dev);
8708         tp->pdev = pdev;
8709         tp->dev = dev;
8710         tp->pm_cap = pm_cap;
8711         tp->mac_mode = TG3_DEF_MAC_MODE;
8712         tp->rx_mode = TG3_DEF_RX_MODE;
8713         tp->tx_mode = TG3_DEF_TX_MODE;
8714         tp->mi_mode = MAC_MI_MODE_BASE;
8715         if (tg3_debug > 0)
8716                 tp->msg_enable = tg3_debug;
8717         else
8718                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8719
8720         /* The word/byte swap controls here control register access byte
8721          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8722          * setting below.
8723          */
8724         tp->misc_host_ctrl =
8725                 MISC_HOST_CTRL_MASK_PCI_INT |
8726                 MISC_HOST_CTRL_WORD_SWAP |
8727                 MISC_HOST_CTRL_INDIR_ACCESS |
8728                 MISC_HOST_CTRL_PCISTATE_RW;
8729
8730         /* The NONFRM (non-frame) byte/word swap controls take effect
8731          * on descriptor entries, anything which isn't packet data.
8732          *
8733          * The StrongARM chips on the board (one for tx, one for rx)
8734          * are running in big-endian mode.
8735          */
8736         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8737                         GRC_MODE_WSWAP_NONFRM_DATA);
8738 #ifdef __BIG_ENDIAN
8739         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8740 #endif
8741         spin_lock_init(&tp->lock);
8742         spin_lock_init(&tp->tx_lock);
8743         spin_lock_init(&tp->indirect_lock);
8744         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8745
8746         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8747         if (tp->regs == 0UL) {
8748                 printk(KERN_ERR PFX "Cannot map device registers, "
8749                        "aborting.\n");
8750                 err = -ENOMEM;
8751                 goto err_out_free_dev;
8752         }
8753
8754         tg3_init_link_config(tp);
8755
8756         tg3_init_bufmgr_config(tp);
8757
8758         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8759         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8760         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8761
8762         dev->open = tg3_open;
8763         dev->stop = tg3_close;
8764         dev->get_stats = tg3_get_stats;
8765         dev->set_multicast_list = tg3_set_rx_mode;
8766         dev->set_mac_address = tg3_set_mac_addr;
8767         dev->do_ioctl = tg3_ioctl;
8768         dev->tx_timeout = tg3_tx_timeout;
8769         dev->poll = tg3_poll;
8770         dev->ethtool_ops = &tg3_ethtool_ops;
8771         dev->weight = 64;
8772         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8773         dev->change_mtu = tg3_change_mtu;
8774         dev->irq = pdev->irq;
8775 #ifdef CONFIG_NET_POLL_CONTROLLER
8776         dev->poll_controller = tg3_poll_controller;
8777 #endif
8778
8779         err = tg3_get_invariants(tp);
8780         if (err) {
8781                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8782                        "aborting.\n");
8783                 goto err_out_iounmap;
8784         }
8785
8786         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8787             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8788                 tp->bufmgr_config.mbuf_read_dma_low_water =
8789                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8790                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8791                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8792                 tp->bufmgr_config.mbuf_high_water =
8793                         DEFAULT_MB_HIGH_WATER_5705;
8794         }
8795
8796 #if TG3_TSO_SUPPORT != 0
8797         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8798                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8799         }
8800         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8801             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8802             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8803             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8804                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8805         } else {
8806                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8807         }
8808
8809         /* TSO is off by default, user can enable using ethtool.  */
8810 #if 0
8811         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8812                 dev->features |= NETIF_F_TSO;
8813 #endif
8814
8815 #endif
8816
8817         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8818             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8819             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8820                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8821                 tp->rx_pending = 63;
8822         }
8823
8824         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8825                 tp->pdev_peer = tg3_find_5704_peer(tp);
8826
8827         err = tg3_get_device_address(tp);
8828         if (err) {
8829                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8830                        "aborting.\n");
8831                 goto err_out_iounmap;
8832         }
8833
8834         /*
8835          * Reset chip in case UNDI or EFI driver did not shutdown
8836          * DMA self test will enable WDMAC and we'll see (spurious)
8837          * pending DMA on the PCI bus at that point.
8838          */
8839         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8840             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8841                 pci_save_state(tp->pdev);
8842                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8843                 tg3_halt(tp);
8844         }
8845
8846         err = tg3_test_dma(tp);
8847         if (err) {
8848                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8849                 goto err_out_iounmap;
8850         }
8851
8852         /* Tigon3 can do ipv4 only... and some chips have buggy
8853          * checksumming.
8854          */
8855         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8856                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8857                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8858         } else
8859                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8860
8861         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8862                 dev->features &= ~NETIF_F_HIGHDMA;
8863
8864         /* flow control autonegotiation is default behavior */
8865         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8866
8867         err = register_netdev(dev);
8868         if (err) {
8869                 printk(KERN_ERR PFX "Cannot register net device, "
8870                        "aborting.\n");
8871                 goto err_out_iounmap;
8872         }
8873
8874         pci_set_drvdata(pdev, dev);
8875
8876         /* Now that we have fully setup the chip, save away a snapshot
8877          * of the PCI config space.  We need to restore this after
8878          * GRC_MISC_CFG core clock resets and some resume events.
8879          */
8880         pci_save_state(tp->pdev);
8881
8882         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8883                dev->name,
8884                tp->board_part_number,
8885                tp->pci_chip_rev_id,
8886                tg3_phy_string(tp),
8887                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8888                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8889                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8890                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8891                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8892                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8893
8894         for (i = 0; i < 6; i++)
8895                 printk("%2.2x%c", dev->dev_addr[i],
8896                        i == 5 ? '\n' : ':');
8897
8898         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8899                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8900                "TSOcap[%d] \n",
8901                dev->name,
8902                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8903                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8904                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8905                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8906                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8907                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8908                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8909
8910         return 0;
8911
8912 err_out_iounmap:
8913         iounmap(tp->regs);
8914
8915 err_out_free_dev:
8916         free_netdev(dev);
8917
8918 err_out_free_res:
8919         pci_release_regions(pdev);
8920
8921 err_out_disable_pdev:
8922         pci_disable_device(pdev);
8923         pci_set_drvdata(pdev, NULL);
8924         return err;
8925 }
8926
8927 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8928 {
8929         struct net_device *dev = pci_get_drvdata(pdev);
8930
8931         if (dev) {
8932                 struct tg3 *tp = netdev_priv(dev);
8933
8934                 unregister_netdev(dev);
8935                 iounmap(tp->regs);
8936                 free_netdev(dev);
8937                 pci_release_regions(pdev);
8938                 pci_disable_device(pdev);
8939                 pci_set_drvdata(pdev, NULL);
8940         }
8941 }
8942
8943 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8944 {
8945         struct net_device *dev = pci_get_drvdata(pdev);
8946         struct tg3 *tp = netdev_priv(dev);
8947         int err;
8948
8949         if (!netif_running(dev))
8950                 return 0;
8951
8952         tg3_netif_stop(tp);
8953
8954         del_timer_sync(&tp->timer);
8955
8956         spin_lock_irq(&tp->lock);
8957         spin_lock(&tp->tx_lock);
8958         tg3_disable_ints(tp);
8959         spin_unlock(&tp->tx_lock);
8960         spin_unlock_irq(&tp->lock);
8961
8962         netif_device_detach(dev);
8963
8964         spin_lock_irq(&tp->lock);
8965         spin_lock(&tp->tx_lock);
8966         tg3_halt(tp);
8967         spin_unlock(&tp->tx_lock);
8968         spin_unlock_irq(&tp->lock);
8969
8970         err = tg3_set_power_state(tp, state);
8971         if (err) {
8972                 spin_lock_irq(&tp->lock);
8973                 spin_lock(&tp->tx_lock);
8974
8975                 tg3_init_hw(tp);
8976
8977                 tp->timer.expires = jiffies + tp->timer_offset;
8978                 add_timer(&tp->timer);
8979
8980                 netif_device_attach(dev);
8981                 tg3_netif_start(tp);
8982
8983                 spin_unlock(&tp->tx_lock);
8984                 spin_unlock_irq(&tp->lock);
8985         }
8986
8987         return err;
8988 }
8989
8990 static int tg3_resume(struct pci_dev *pdev)
8991 {
8992         struct net_device *dev = pci_get_drvdata(pdev);
8993         struct tg3 *tp = netdev_priv(dev);
8994         int err;
8995
8996         if (!netif_running(dev))
8997                 return 0;
8998
8999         pci_restore_state(tp->pdev);
9000
9001         err = tg3_set_power_state(tp, 0);
9002         if (err)
9003                 return err;
9004
9005         netif_device_attach(dev);
9006
9007         spin_lock_irq(&tp->lock);
9008         spin_lock(&tp->tx_lock);
9009
9010         tg3_init_hw(tp);
9011
9012         tp->timer.expires = jiffies + tp->timer_offset;
9013         add_timer(&tp->timer);
9014
9015         tg3_enable_ints(tp);
9016
9017         tg3_netif_start(tp);
9018
9019         spin_unlock(&tp->tx_lock);
9020         spin_unlock_irq(&tp->lock);
9021
9022         return 0;
9023 }
9024
9025 static struct pci_driver tg3_driver = {
9026         .name           = DRV_MODULE_NAME,
9027         .id_table       = tg3_pci_tbl,
9028         .probe          = tg3_init_one,
9029         .remove         = __devexit_p(tg3_remove_one),
9030         .suspend        = tg3_suspend,
9031         .resume         = tg3_resume
9032 };
9033
9034 static int __init tg3_init(void)
9035 {
9036         return pci_module_init(&tg3_driver);
9037 }
9038
9039 static void __exit tg3_cleanup(void)
9040 {
9041         pci_unregister_driver(&tg3_driver);
9042 }
9043
9044 module_init(tg3_init);
9045 module_exit(tg3_cleanup);