Merge to Fedora kernel-2.6.6-1.406
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  */
7
8 #include <linux/config.h>
9
10 #include <linux/module.h>
11
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/mii.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ip.h>
27 #include <linux/tcp.h>
28 #include <linux/workqueue.h>
29
30 #include <net/checksum.h>
31
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/byteorder.h>
35 #include <asm/uaccess.h>
36
37 #ifdef CONFIG_SPARC64
38 #include <asm/idprom.h>
39 #include <asm/oplib.h>
40 #include <asm/pbm.h>
41 #endif
42
43 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #define TG3_VLAN_TAG_USED 1
45 #else
46 #define TG3_VLAN_TAG_USED 0
47 #endif
48
49 #ifdef NETIF_F_TSO
50 #define TG3_TSO_SUPPORT 1
51 #else
52 #define TG3_TSO_SUPPORT 0
53 #endif
54
55 #include "tg3.h"
56
57 #define DRV_MODULE_NAME         "tg3"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "3.5"
60 #define DRV_MODULE_RELDATE      "May 25, 2004"
61
62 #define TG3_DEF_MAC_MODE        0
63 #define TG3_DEF_RX_MODE         0
64 #define TG3_DEF_TX_MODE         0
65 #define TG3_DEF_MSG_ENABLE        \
66         (NETIF_MSG_DRV          | \
67          NETIF_MSG_PROBE        | \
68          NETIF_MSG_LINK         | \
69          NETIF_MSG_TIMER        | \
70          NETIF_MSG_IFDOWN       | \
71          NETIF_MSG_IFUP         | \
72          NETIF_MSG_RX_ERR       | \
73          NETIF_MSG_TX_ERR)
74
75 /* length of time before we decide the hardware is borked,
76  * and dev->tx_timeout() should be called to fix the problem
77  */
78 #define TG3_TX_TIMEOUT                  (5 * HZ)
79
80 /* hardware minimum and maximum for a single frame's data payload */
81 #define TG3_MIN_MTU                     60
82 #define TG3_MAX_MTU(tp) \
83         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
84           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
85
86 /* These numbers seem to be hard coded in the NIC firmware somehow.
87  * You can't change the ring sizes, but you can change where you place
88  * them in the NIC onboard memory.
89  */
90 #define TG3_RX_RING_SIZE                512
91 #define TG3_DEF_RX_RING_PENDING         200
92 #define TG3_RX_JUMBO_RING_SIZE          256
93 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
94
95 /* Do not place this n-ring entries value into the tp struct itself,
96  * we really want to expose these constants to GCC so that modulo et
97  * al.  operations are done with shifts and masks instead of with
98  * hw multiply/modulo instructions.  Another solution would be to
99  * replace things like '% foo' with '& (foo - 1)'.
100  */
101 #define TG3_RX_RCB_RING_SIZE(tp)        \
102         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
103           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
104          512 : 1024)
105
106 #define TG3_TX_RING_SIZE                512
107 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
108
109 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
110                                  TG3_RX_RING_SIZE)
111 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_JUMBO_RING_SIZE)
113 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
114                                    TG3_RX_RCB_RING_SIZE(tp))
115 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
116                                  TG3_TX_RING_SIZE)
117 #define TX_RING_GAP(TP) \
118         (TG3_TX_RING_SIZE - (TP)->tx_pending)
119 #define TX_BUFFS_AVAIL(TP)                                              \
120         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
121           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
122           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
123 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124
125 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
126 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
127
128 /* minimum number of free TX descriptors required to wake up TX process */
129 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
130
131 /* number of ETHTOOL_GSTATS u64's */
132 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133
134 static char version[] __devinitdata =
135         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
136
137 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
138 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
139 MODULE_LICENSE("GPL");
140 MODULE_PARM(tg3_debug, "i");
141 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144
145 static struct pci_device_id tg3_pci_tbl[] = {
146         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
147           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
148         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
149           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
150         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
151           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { 0, }
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 struct {
218         char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
299 {
300         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
301                 unsigned long flags;
302
303                 spin_lock_irqsave(&tp->indirect_lock, flags);
304                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
305                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
306                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
307         } else {
308                 writel(val, tp->regs + off);
309                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
310                         readl(tp->regs + off);
311         }
312 }
313
314 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
315 {
316         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
317                 unsigned long flags;
318
319                 spin_lock_irqsave(&tp->indirect_lock, flags);
320                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
321                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
322                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
323         } else {
324                 unsigned long dest = tp->regs + off;
325                 writel(val, dest);
326                 readl(dest);    /* always flush PCI write */
327         }
328 }
329
330 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long mbox = tp->regs + off;
333         writel(val, mbox);
334         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
335                 readl(mbox);
336 }
337
338 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
339 {
340         unsigned long mbox = tp->regs + off;
341         writel(val, mbox);
342         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
343                 writel(val, mbox);
344         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
345                 readl(mbox);
346 }
347
348 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
349 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
350 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
351
352 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
353 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
354 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
355 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
356 #define tr32(reg)               readl(tp->regs + (reg))
357 #define tr16(reg)               readw(tp->regs + (reg))
358 #define tr8(reg)                readb(tp->regs + (reg))
359
360 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
361 {
362         unsigned long flags;
363
364         spin_lock_irqsave(&tp->indirect_lock, flags);
365         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
366         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
367
368         /* Always leave this as zero. */
369         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
370         spin_unlock_irqrestore(&tp->indirect_lock, flags);
371 }
372
373 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
374 {
375         unsigned long flags;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
379         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
380
381         /* Always leave this as zero. */
382         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
383         spin_unlock_irqrestore(&tp->indirect_lock, flags);
384 }
385
386 static void tg3_disable_ints(struct tg3 *tp)
387 {
388         tw32(TG3PCI_MISC_HOST_CTRL,
389              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
390         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
391         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
392 }
393
394 static inline void tg3_cond_int(struct tg3 *tp)
395 {
396         if (tp->hw_status->status & SD_STATUS_UPDATED)
397                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
398 }
399
400 static void tg3_enable_ints(struct tg3 *tp)
401 {
402         tw32(TG3PCI_MISC_HOST_CTRL,
403              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
404         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
405         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
406
407         tg3_cond_int(tp);
408 }
409
410 static inline void tg3_netif_stop(struct tg3 *tp)
411 {
412         netif_poll_disable(tp->dev);
413         netif_tx_disable(tp->dev);
414 }
415
416 static inline void tg3_netif_start(struct tg3 *tp)
417 {
418         netif_wake_queue(tp->dev);
419         /* NOTE: unconditional netif_wake_queue is only appropriate
420          * so long as all callers are assured to have free tx slots
421          * (such as after tg3_init_hw)
422          */
423         netif_poll_enable(tp->dev);
424         tg3_cond_int(tp);
425 }
426
427 static void tg3_switch_clocks(struct tg3 *tp)
428 {
429         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
430         u32 orig_clock_ctrl;
431
432         orig_clock_ctrl = clock_ctrl;
433         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
434                        CLOCK_CTRL_CLKRUN_OENABLE |
435                        0x1f);
436         tp->pci_clock_ctrl = clock_ctrl;
437
438         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
439             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
440             (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
441                 tw32_f(TG3PCI_CLOCK_CTRL,
442                      clock_ctrl |
443                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
444                 udelay(40);
445                 tw32_f(TG3PCI_CLOCK_CTRL,
446                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
447                 udelay(40);
448         }
449         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
450         udelay(40);
451 }
452
453 #define PHY_BUSY_LOOPS  5000
454
455 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
456 {
457         u32 frame_val;
458         int loops, ret;
459
460         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
461                 tw32_f(MAC_MI_MODE,
462                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
463                 udelay(80);
464         }
465
466         *val = 0xffffffff;
467
468         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
469                       MI_COM_PHY_ADDR_MASK);
470         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
471                       MI_COM_REG_ADDR_MASK);
472         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
473         
474         tw32_f(MAC_MI_COM, frame_val);
475
476         loops = PHY_BUSY_LOOPS;
477         while (loops-- > 0) {
478                 udelay(10);
479                 frame_val = tr32(MAC_MI_COM);
480
481                 if ((frame_val & MI_COM_BUSY) == 0) {
482                         udelay(5);
483                         frame_val = tr32(MAC_MI_COM);
484                         break;
485                 }
486         }
487
488         ret = -EBUSY;
489         if (loops > 0) {
490                 *val = frame_val & MI_COM_DATA_MASK;
491                 ret = 0;
492         }
493
494         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
495                 tw32_f(MAC_MI_MODE, tp->mi_mode);
496                 udelay(80);
497         }
498
499         return ret;
500 }
501
502 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
503 {
504         u32 frame_val;
505         int loops, ret;
506
507         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
508                 tw32_f(MAC_MI_MODE,
509                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
510                 udelay(80);
511         }
512
513         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
514                       MI_COM_PHY_ADDR_MASK);
515         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
516                       MI_COM_REG_ADDR_MASK);
517         frame_val |= (val & MI_COM_DATA_MASK);
518         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
519         
520         tw32_f(MAC_MI_COM, frame_val);
521
522         loops = PHY_BUSY_LOOPS;
523         while (loops-- > 0) {
524                 udelay(10);
525                 frame_val = tr32(MAC_MI_COM);
526                 if ((frame_val & MI_COM_BUSY) == 0) {
527                         udelay(5);
528                         frame_val = tr32(MAC_MI_COM);
529                         break;
530                 }
531         }
532
533         ret = -EBUSY;
534         if (loops > 0)
535                 ret = 0;
536
537         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
538                 tw32_f(MAC_MI_MODE, tp->mi_mode);
539                 udelay(80);
540         }
541
542         return ret;
543 }
544
545 static void tg3_phy_set_wirespeed(struct tg3 *tp)
546 {
547         u32 val;
548
549         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
550                 return;
551
552         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
553         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
554         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
555 }
556
557 static int tg3_bmcr_reset(struct tg3 *tp)
558 {
559         u32 phy_control;
560         int limit, err;
561
562         /* OK, reset it, and poll the BMCR_RESET bit until it
563          * clears or we time out.
564          */
565         phy_control = BMCR_RESET;
566         err = tg3_writephy(tp, MII_BMCR, phy_control);
567         if (err != 0)
568                 return -EBUSY;
569
570         limit = 5000;
571         while (limit--) {
572                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
573                 if (err != 0)
574                         return -EBUSY;
575
576                 if ((phy_control & BMCR_RESET) == 0) {
577                         udelay(40);
578                         break;
579                 }
580                 udelay(10);
581         }
582         if (limit <= 0)
583                 return -EBUSY;
584
585         return 0;
586 }
587
588 static int tg3_wait_macro_done(struct tg3 *tp)
589 {
590         int limit = 100;
591
592         while (limit--) {
593                 u32 tmp32;
594
595                 tg3_readphy(tp, 0x16, &tmp32);
596                 if ((tmp32 & 0x1000) == 0)
597                         break;
598         }
599         if (limit <= 0)
600                 return -EBUSY;
601
602         return 0;
603 }
604
605 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
606 {
607         static const u32 test_pat[4][6] = {
608         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
609         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
610         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
611         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
612         };
613         int chan;
614
615         for (chan = 0; chan < 4; chan++) {
616                 int i;
617
618                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
619                              (chan * 0x2000) | 0x0200);
620                 tg3_writephy(tp, 0x16, 0x0002);
621
622                 for (i = 0; i < 6; i++)
623                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
624                                      test_pat[chan][i]);
625
626                 tg3_writephy(tp, 0x16, 0x0202);
627                 if (tg3_wait_macro_done(tp)) {
628                         *resetp = 1;
629                         return -EBUSY;
630                 }
631
632                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
633                              (chan * 0x2000) | 0x0200);
634                 tg3_writephy(tp, 0x16, 0x0082);
635                 if (tg3_wait_macro_done(tp)) {
636                         *resetp = 1;
637                         return -EBUSY;
638                 }
639
640                 tg3_writephy(tp, 0x16, 0x0802);
641                 if (tg3_wait_macro_done(tp)) {
642                         *resetp = 1;
643                         return -EBUSY;
644                 }
645
646                 for (i = 0; i < 6; i += 2) {
647                         u32 low, high;
648
649                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
650                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
651                         if (tg3_wait_macro_done(tp)) {
652                                 *resetp = 1;
653                                 return -EBUSY;
654                         }
655                         low &= 0x7fff;
656                         high &= 0x000f;
657                         if (low != test_pat[chan][i] ||
658                             high != test_pat[chan][i+1]) {
659                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
660                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
661                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
662
663                                 return -EBUSY;
664                         }
665                 }
666         }
667
668         return 0;
669 }
670
671 static int tg3_phy_reset_chanpat(struct tg3 *tp)
672 {
673         int chan;
674
675         for (chan = 0; chan < 4; chan++) {
676                 int i;
677
678                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
679                              (chan * 0x2000) | 0x0200);
680                 tg3_writephy(tp, 0x16, 0x0002);
681                 for (i = 0; i < 6; i++)
682                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
683                 tg3_writephy(tp, 0x16, 0x0202);
684                 if (tg3_wait_macro_done(tp))
685                         return -EBUSY;
686         }
687
688         return 0;
689 }
690
691 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
692 {
693         u32 reg32, phy9_orig;
694         int retries, do_phy_reset, err;
695
696         retries = 10;
697         do_phy_reset = 1;
698         do {
699                 if (do_phy_reset) {
700                         err = tg3_bmcr_reset(tp);
701                         if (err)
702                                 return err;
703                         do_phy_reset = 0;
704                 }
705
706                 /* Disable transmitter and interrupt.  */
707                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
708                 reg32 |= 0x3000;
709                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
710
711                 /* Set full-duplex, 1000 mbps.  */
712                 tg3_writephy(tp, MII_BMCR,
713                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
714
715                 /* Set to master mode.  */
716                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
717                 tg3_writephy(tp, MII_TG3_CTRL,
718                              (MII_TG3_CTRL_AS_MASTER |
719                               MII_TG3_CTRL_ENABLE_AS_MASTER));
720
721                 /* Enable SM_DSP_CLOCK and 6dB.  */
722                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
723
724                 /* Block the PHY control access.  */
725                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
726                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
727
728                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
729                 if (!err)
730                         break;
731         } while (--retries);
732
733         err = tg3_phy_reset_chanpat(tp);
734         if (err)
735                 return err;
736
737         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
738         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
739
740         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
741         tg3_writephy(tp, 0x16, 0x0000);
742
743         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
744             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
745                 /* Set Extended packet length bit for jumbo frames */
746                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
747         }
748         else {
749                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
750         }
751
752         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
753
754         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
755         reg32 &= ~0x3000;
756         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
757
758         return err;
759 }
760
761 /* This will reset the tigon3 PHY if there is no valid
762  * link unless the FORCE argument is non-zero.
763  */
764 static int tg3_phy_reset(struct tg3 *tp)
765 {
766         u32 phy_status;
767         int err;
768
769         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
770         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
771         if (err != 0)
772                 return -EBUSY;
773
774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
775             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
776             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
777                 err = tg3_phy_reset_5703_4_5(tp);
778                 if (err)
779                         return err;
780                 goto out;
781         }
782
783         err = tg3_bmcr_reset(tp);
784         if (err)
785                 return err;
786
787 out:
788         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
789                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
790                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
791                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
792                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
793                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
794                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
795         }
796         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
797                 tg3_writephy(tp, 0x1c, 0x8d68);
798                 tg3_writephy(tp, 0x1c, 0x8d68);
799         }
800         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
801                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
802                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
803                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
804                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
805                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
806                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
807                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
808                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
809         }
810         /* Set Extended packet length bit (bit 14) on all chips that */
811         /* support jumbo frames */
812         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
813                 /* Cannot do read-modify-write on 5401 */
814                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
815         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
816                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
817                 u32 phy_reg;
818
819                 /* Set bit 14 with read-modify-write to preserve other bits */
820                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
821                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
822                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
823         }
824         tg3_phy_set_wirespeed(tp);
825         return 0;
826 }
827
828 static void tg3_frob_aux_power(struct tg3 *tp)
829 {
830         struct tg3 *tp_peer = tp;
831
832         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
833                 return;
834
835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
836                 tp_peer = pci_get_drvdata(tp->pdev_peer);
837                 if (!tp_peer)
838                         BUG();
839         }
840
841
842         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
843             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
845                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
846                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
847                              (GRC_LCLCTRL_GPIO_OE0 |
848                               GRC_LCLCTRL_GPIO_OE1 |
849                               GRC_LCLCTRL_GPIO_OE2 |
850                               GRC_LCLCTRL_GPIO_OUTPUT0 |
851                               GRC_LCLCTRL_GPIO_OUTPUT1));
852                         udelay(100);
853                 } else {
854                         if (tp_peer != tp &&
855                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
856                                 return;
857
858                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
859                              (GRC_LCLCTRL_GPIO_OE0 |
860                               GRC_LCLCTRL_GPIO_OE1 |
861                               GRC_LCLCTRL_GPIO_OE2 |
862                               GRC_LCLCTRL_GPIO_OUTPUT1 |
863                               GRC_LCLCTRL_GPIO_OUTPUT2));
864                         udelay(100);
865
866                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
867                              (GRC_LCLCTRL_GPIO_OE0 |
868                               GRC_LCLCTRL_GPIO_OE1 |
869                               GRC_LCLCTRL_GPIO_OE2 |
870                               GRC_LCLCTRL_GPIO_OUTPUT0 |
871                               GRC_LCLCTRL_GPIO_OUTPUT1 |
872                               GRC_LCLCTRL_GPIO_OUTPUT2));
873                         udelay(100);
874
875                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
876                              (GRC_LCLCTRL_GPIO_OE0 |
877                               GRC_LCLCTRL_GPIO_OE1 |
878                               GRC_LCLCTRL_GPIO_OE2 |
879                               GRC_LCLCTRL_GPIO_OUTPUT0 |
880                               GRC_LCLCTRL_GPIO_OUTPUT1));
881                         udelay(100);
882                 }
883         } else {
884                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
885                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
886                         if (tp_peer != tp &&
887                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
888                                 return;
889
890                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
891                              (GRC_LCLCTRL_GPIO_OE1 |
892                               GRC_LCLCTRL_GPIO_OUTPUT1));
893                         udelay(100);
894
895                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
896                              (GRC_LCLCTRL_GPIO_OE1));
897                         udelay(100);
898
899                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
900                              (GRC_LCLCTRL_GPIO_OE1 |
901                               GRC_LCLCTRL_GPIO_OUTPUT1));
902                         udelay(100);
903                 }
904         }
905 }
906
907 static int tg3_setup_phy(struct tg3 *, int);
908
909 #define RESET_KIND_SHUTDOWN     0
910 #define RESET_KIND_INIT         1
911 #define RESET_KIND_SUSPEND      2
912
913 static void tg3_write_sig_post_reset(struct tg3 *, int);
914
915 static int tg3_set_power_state(struct tg3 *tp, int state)
916 {
917         u32 misc_host_ctrl;
918         u16 power_control, power_caps;
919         int pm = tp->pm_cap;
920
921         /* Make sure register accesses (indirect or otherwise)
922          * will function correctly.
923          */
924         pci_write_config_dword(tp->pdev,
925                                TG3PCI_MISC_HOST_CTRL,
926                                tp->misc_host_ctrl);
927
928         pci_read_config_word(tp->pdev,
929                              pm + PCI_PM_CTRL,
930                              &power_control);
931         power_control |= PCI_PM_CTRL_PME_STATUS;
932         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
933         switch (state) {
934         case 0:
935                 power_control |= 0;
936                 pci_write_config_word(tp->pdev,
937                                       pm + PCI_PM_CTRL,
938                                       power_control);
939                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
940                 udelay(100);
941
942                 return 0;
943
944         case 1:
945                 power_control |= 1;
946                 break;
947
948         case 2:
949                 power_control |= 2;
950                 break;
951
952         case 3:
953                 power_control |= 3;
954                 break;
955
956         default:
957                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
958                        "requested.\n",
959                        tp->dev->name, state);
960                 return -EINVAL;
961         };
962
963         power_control |= PCI_PM_CTRL_PME_ENABLE;
964
965         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
966         tw32(TG3PCI_MISC_HOST_CTRL,
967              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
968
969         if (tp->link_config.phy_is_low_power == 0) {
970                 tp->link_config.phy_is_low_power = 1;
971                 tp->link_config.orig_speed = tp->link_config.speed;
972                 tp->link_config.orig_duplex = tp->link_config.duplex;
973                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
974         }
975
976         if (tp->phy_id != PHY_ID_SERDES) {
977                 tp->link_config.speed = SPEED_10;
978                 tp->link_config.duplex = DUPLEX_HALF;
979                 tp->link_config.autoneg = AUTONEG_ENABLE;
980                 tg3_setup_phy(tp, 0);
981         }
982
983         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
984
985         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
986                 u32 mac_mode;
987
988                 if (tp->phy_id != PHY_ID_SERDES) {
989                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
990                         udelay(40);
991
992                         mac_mode = MAC_MODE_PORT_MODE_MII;
993
994                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
995                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
996                                 mac_mode |= MAC_MODE_LINK_POLARITY;
997                 } else {
998                         mac_mode = MAC_MODE_PORT_MODE_TBI;
999                 }
1000
1001                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1002                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1003
1004                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1005                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1006                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1007
1008                 tw32_f(MAC_MODE, mac_mode);
1009                 udelay(100);
1010
1011                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1012                 udelay(10);
1013         }
1014
1015         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1016             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1017              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1018                 u32 base_val;
1019
1020                 base_val = tp->pci_clock_ctrl;
1021                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1022                              CLOCK_CTRL_TXCLK_DISABLE);
1023
1024                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1025                      CLOCK_CTRL_ALTCLK |
1026                      CLOCK_CTRL_PWRDOWN_PLL133);
1027                 udelay(40);
1028         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1029                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1030                 u32 newbits1, newbits2;
1031
1032                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1033                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1034                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1035                                     CLOCK_CTRL_TXCLK_DISABLE |
1036                                     CLOCK_CTRL_ALTCLK);
1037                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1038                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
1039                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1040                         newbits1 = CLOCK_CTRL_625_CORE;
1041                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1042                 } else {
1043                         newbits1 = CLOCK_CTRL_ALTCLK;
1044                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1045                 }
1046
1047                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1048                 udelay(40);
1049
1050                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1051                 udelay(40);
1052
1053                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
1054                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
1055                         u32 newbits3;
1056
1057                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1058                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1059                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1060                                             CLOCK_CTRL_TXCLK_DISABLE |
1061                                             CLOCK_CTRL_44MHZ_CORE);
1062                         } else {
1063                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1064                         }
1065
1066                         tw32_f(TG3PCI_CLOCK_CTRL,
1067                                          tp->pci_clock_ctrl | newbits3);
1068                         udelay(40);
1069                 }
1070         }
1071
1072         tg3_frob_aux_power(tp);
1073
1074         /* Finally, set the new power state. */
1075         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1076
1077         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1078
1079         return 0;
1080 }
1081
1082 static void tg3_link_report(struct tg3 *tp)
1083 {
1084         if (!netif_carrier_ok(tp->dev)) {
1085                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1086         } else {
1087                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1088                        tp->dev->name,
1089                        (tp->link_config.active_speed == SPEED_1000 ?
1090                         1000 :
1091                         (tp->link_config.active_speed == SPEED_100 ?
1092                          100 : 10)),
1093                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1094                         "full" : "half"));
1095
1096                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1097                        "%s for RX.\n",
1098                        tp->dev->name,
1099                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1100                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1101         }
1102 }
1103
1104 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1105 {
1106         u32 new_tg3_flags = 0;
1107         u32 old_rx_mode = tp->rx_mode;
1108         u32 old_tx_mode = tp->tx_mode;
1109
1110         if (local_adv & ADVERTISE_PAUSE_CAP) {
1111                 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1112                         if (remote_adv & LPA_PAUSE_CAP)
1113                                 new_tg3_flags |=
1114                                         (TG3_FLAG_RX_PAUSE |
1115                                          TG3_FLAG_TX_PAUSE);
1116                         else if (remote_adv & LPA_PAUSE_ASYM)
1117                                 new_tg3_flags |=
1118                                         (TG3_FLAG_RX_PAUSE);
1119                 } else {
1120                         if (remote_adv & LPA_PAUSE_CAP)
1121                                 new_tg3_flags |=
1122                                         (TG3_FLAG_RX_PAUSE |
1123                                          TG3_FLAG_TX_PAUSE);
1124                 }
1125         } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1126                 if ((remote_adv & LPA_PAUSE_CAP) &&
1127                     (remote_adv & LPA_PAUSE_ASYM))
1128                         new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1129         }
1130
1131         tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1132         tp->tg3_flags |= new_tg3_flags;
1133
1134         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1135                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1136         else
1137                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1138
1139         if (old_rx_mode != tp->rx_mode) {
1140                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1141         }
1142         
1143         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1144                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1145         else
1146                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1147
1148         if (old_tx_mode != tp->tx_mode) {
1149                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1150         }
1151 }
1152
1153 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1154 {
1155         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1156         case MII_TG3_AUX_STAT_10HALF:
1157                 *speed = SPEED_10;
1158                 *duplex = DUPLEX_HALF;
1159                 break;
1160
1161         case MII_TG3_AUX_STAT_10FULL:
1162                 *speed = SPEED_10;
1163                 *duplex = DUPLEX_FULL;
1164                 break;
1165
1166         case MII_TG3_AUX_STAT_100HALF:
1167                 *speed = SPEED_100;
1168                 *duplex = DUPLEX_HALF;
1169                 break;
1170
1171         case MII_TG3_AUX_STAT_100FULL:
1172                 *speed = SPEED_100;
1173                 *duplex = DUPLEX_FULL;
1174                 break;
1175
1176         case MII_TG3_AUX_STAT_1000HALF:
1177                 *speed = SPEED_1000;
1178                 *duplex = DUPLEX_HALF;
1179                 break;
1180
1181         case MII_TG3_AUX_STAT_1000FULL:
1182                 *speed = SPEED_1000;
1183                 *duplex = DUPLEX_FULL;
1184                 break;
1185
1186         default:
1187                 *speed = SPEED_INVALID;
1188                 *duplex = DUPLEX_INVALID;
1189                 break;
1190         };
1191 }
1192
1193 static int tg3_phy_copper_begin(struct tg3 *tp)
1194 {
1195         u32 new_adv;
1196         int i;
1197
1198         if (tp->link_config.phy_is_low_power) {
1199                 /* Entering low power mode.  Disable gigabit and
1200                  * 100baseT advertisements.
1201                  */
1202                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1203
1204                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1205                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1206                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1207                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1208
1209                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1210         } else if (tp->link_config.speed == SPEED_INVALID) {
1211                 tp->link_config.advertising =
1212                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1213                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1214                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1215                          ADVERTISED_Autoneg | ADVERTISED_MII);
1216
1217                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1218                         tp->link_config.advertising &=
1219                                 ~(ADVERTISED_1000baseT_Half |
1220                                   ADVERTISED_1000baseT_Full);
1221
1222                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1223                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1224                         new_adv |= ADVERTISE_10HALF;
1225                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1226                         new_adv |= ADVERTISE_10FULL;
1227                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1228                         new_adv |= ADVERTISE_100HALF;
1229                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1230                         new_adv |= ADVERTISE_100FULL;
1231                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1232
1233                 if (tp->link_config.advertising &
1234                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1235                         new_adv = 0;
1236                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1237                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1238                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1239                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1240                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1241                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1242                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1243                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1244                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1245                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1246                 } else {
1247                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1248                 }
1249         } else {
1250                 /* Asking for a specific link mode. */
1251                 if (tp->link_config.speed == SPEED_1000) {
1252                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1253                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1254
1255                         if (tp->link_config.duplex == DUPLEX_FULL)
1256                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1257                         else
1258                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1259                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1260                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1261                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1262                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1263                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1264                 } else {
1265                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1266
1267                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1268                         if (tp->link_config.speed == SPEED_100) {
1269                                 if (tp->link_config.duplex == DUPLEX_FULL)
1270                                         new_adv |= ADVERTISE_100FULL;
1271                                 else
1272                                         new_adv |= ADVERTISE_100HALF;
1273                         } else {
1274                                 if (tp->link_config.duplex == DUPLEX_FULL)
1275                                         new_adv |= ADVERTISE_10FULL;
1276                                 else
1277                                         new_adv |= ADVERTISE_10HALF;
1278                         }
1279                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1280                 }
1281         }
1282
1283         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1284             tp->link_config.speed != SPEED_INVALID) {
1285                 u32 bmcr, orig_bmcr;
1286
1287                 tp->link_config.active_speed = tp->link_config.speed;
1288                 tp->link_config.active_duplex = tp->link_config.duplex;
1289
1290                 bmcr = 0;
1291                 switch (tp->link_config.speed) {
1292                 default:
1293                 case SPEED_10:
1294                         break;
1295
1296                 case SPEED_100:
1297                         bmcr |= BMCR_SPEED100;
1298                         break;
1299
1300                 case SPEED_1000:
1301                         bmcr |= TG3_BMCR_SPEED1000;
1302                         break;
1303                 };
1304
1305                 if (tp->link_config.duplex == DUPLEX_FULL)
1306                         bmcr |= BMCR_FULLDPLX;
1307
1308                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1309                 if (bmcr != orig_bmcr) {
1310                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1311                         for (i = 0; i < 1500; i++) {
1312                                 u32 tmp;
1313
1314                                 udelay(10);
1315                                 tg3_readphy(tp, MII_BMSR, &tmp);
1316                                 tg3_readphy(tp, MII_BMSR, &tmp);
1317                                 if (!(tmp & BMSR_LSTATUS)) {
1318                                         udelay(40);
1319                                         break;
1320                                 }
1321                         }
1322                         tg3_writephy(tp, MII_BMCR, bmcr);
1323                         udelay(40);
1324                 }
1325         } else {
1326                 tg3_writephy(tp, MII_BMCR,
1327                              BMCR_ANENABLE | BMCR_ANRESTART);
1328         }
1329
1330         return 0;
1331 }
1332
1333 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1334 {
1335         int err;
1336
1337         /* Turn off tap power management. */
1338         /* Set Extended packet length bit */
1339         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1340
1341         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1342         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1343
1344         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1345         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1346
1347         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1348         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1349
1350         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1351         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1352
1353         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1354         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1355
1356         udelay(40);
1357
1358         return err;
1359 }
1360
1361 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1362 {
1363         u32 adv_reg, all_mask;
1364
1365         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1366         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1367                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1368         if ((adv_reg & all_mask) != all_mask)
1369                 return 0;
1370         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1371                 u32 tg3_ctrl;
1372
1373                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1374                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1375                             MII_TG3_CTRL_ADV_1000_FULL);
1376                 if ((tg3_ctrl & all_mask) != all_mask)
1377                         return 0;
1378         }
1379         return 1;
1380 }
1381
1382 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1383 {
1384         int current_link_up;
1385         u32 bmsr, dummy;
1386         u16 current_speed;
1387         u8 current_duplex;
1388         int i, err;
1389
1390         tw32(MAC_EVENT, 0);
1391
1392         tw32_f(MAC_STATUS,
1393              (MAC_STATUS_SYNC_CHANGED |
1394               MAC_STATUS_CFG_CHANGED |
1395               MAC_STATUS_MI_COMPLETION |
1396               MAC_STATUS_LNKSTATE_CHANGED));
1397         udelay(40);
1398
1399         tp->mi_mode = MAC_MI_MODE_BASE;
1400         tw32_f(MAC_MI_MODE, tp->mi_mode);
1401         udelay(80);
1402
1403         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1404
1405         /* Some third-party PHYs need to be reset on link going
1406          * down.
1407          */
1408         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1409              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1410              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1411             netif_carrier_ok(tp->dev)) {
1412                 tg3_readphy(tp, MII_BMSR, &bmsr);
1413                 tg3_readphy(tp, MII_BMSR, &bmsr);
1414                 if (!(bmsr & BMSR_LSTATUS))
1415                         force_reset = 1;
1416         }
1417         if (force_reset)
1418                 tg3_phy_reset(tp);
1419
1420         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1421                 tg3_readphy(tp, MII_BMSR, &bmsr);
1422                 tg3_readphy(tp, MII_BMSR, &bmsr);
1423
1424                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1425                         bmsr = 0;
1426
1427                 if (!(bmsr & BMSR_LSTATUS)) {
1428                         err = tg3_init_5401phy_dsp(tp);
1429                         if (err)
1430                                 return err;
1431
1432                         tg3_readphy(tp, MII_BMSR, &bmsr);
1433                         for (i = 0; i < 1000; i++) {
1434                                 udelay(10);
1435                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1436                                 if (bmsr & BMSR_LSTATUS) {
1437                                         udelay(40);
1438                                         break;
1439                                 }
1440                         }
1441
1442                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1443                             !(bmsr & BMSR_LSTATUS) &&
1444                             tp->link_config.active_speed == SPEED_1000) {
1445                                 err = tg3_phy_reset(tp);
1446                                 if (!err)
1447                                         err = tg3_init_5401phy_dsp(tp);
1448                                 if (err)
1449                                         return err;
1450                         }
1451                 }
1452         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1453                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1454                 /* 5701 {A0,B0} CRC bug workaround */
1455                 tg3_writephy(tp, 0x15, 0x0a75);
1456                 tg3_writephy(tp, 0x1c, 0x8c68);
1457                 tg3_writephy(tp, 0x1c, 0x8d68);
1458                 tg3_writephy(tp, 0x1c, 0x8c68);
1459         }
1460
1461         /* Clear pending interrupts... */
1462         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1463         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1464
1465         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1466                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1467         else
1468                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1469
1470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1472                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1473                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1474                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1475                 else
1476                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1477         }
1478
1479         current_link_up = 0;
1480         current_speed = SPEED_INVALID;
1481         current_duplex = DUPLEX_INVALID;
1482
1483         bmsr = 0;
1484         for (i = 0; i < 100; i++) {
1485                 tg3_readphy(tp, MII_BMSR, &bmsr);
1486                 tg3_readphy(tp, MII_BMSR, &bmsr);
1487                 if (bmsr & BMSR_LSTATUS)
1488                         break;
1489                 udelay(40);
1490         }
1491
1492         if (bmsr & BMSR_LSTATUS) {
1493                 u32 aux_stat, bmcr;
1494
1495                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1496                 for (i = 0; i < 2000; i++) {
1497                         udelay(10);
1498                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1499                         if (aux_stat)
1500                                 break;
1501                 }
1502
1503                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1504                                              &current_speed,
1505                                              &current_duplex);
1506
1507                 bmcr = 0;
1508                 for (i = 0; i < 200; i++) {
1509                         tg3_readphy(tp, MII_BMCR, &bmcr);
1510                         tg3_readphy(tp, MII_BMCR, &bmcr);
1511                         if (bmcr && bmcr != 0x7fff)
1512                                 break;
1513                         udelay(10);
1514                 }
1515
1516                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1517                         if (bmcr & BMCR_ANENABLE) {
1518                                 current_link_up = 1;
1519
1520                                 /* Force autoneg restart if we are exiting
1521                                  * low power mode.
1522                                  */
1523                                 if (!tg3_copper_is_advertising_all(tp))
1524                                         current_link_up = 0;
1525                         } else {
1526                                 current_link_up = 0;
1527                         }
1528                 } else {
1529                         if (!(bmcr & BMCR_ANENABLE) &&
1530                             tp->link_config.speed == current_speed &&
1531                             tp->link_config.duplex == current_duplex) {
1532                                 current_link_up = 1;
1533                         } else {
1534                                 current_link_up = 0;
1535                         }
1536                 }
1537
1538                 tp->link_config.active_speed = current_speed;
1539                 tp->link_config.active_duplex = current_duplex;
1540         }
1541
1542         if (current_link_up == 1 &&
1543             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1544             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1545                 u32 local_adv, remote_adv;
1546
1547                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1548                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1549
1550                 tg3_readphy(tp, MII_LPA, &remote_adv);
1551                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1552
1553                 /* If we are not advertising full pause capability,
1554                  * something is wrong.  Bring the link down and reconfigure.
1555                  */
1556                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1557                         current_link_up = 0;
1558                 } else {
1559                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1560                 }
1561         }
1562
1563         if (current_link_up == 0) {
1564                 u32 tmp;
1565
1566                 tg3_phy_copper_begin(tp);
1567
1568                 tg3_readphy(tp, MII_BMSR, &tmp);
1569                 tg3_readphy(tp, MII_BMSR, &tmp);
1570                 if (tmp & BMSR_LSTATUS)
1571                         current_link_up = 1;
1572         }
1573
1574         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1575         if (current_link_up == 1) {
1576                 if (tp->link_config.active_speed == SPEED_100 ||
1577                     tp->link_config.active_speed == SPEED_10)
1578                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1579                 else
1580                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1581         } else
1582                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1583
1584         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1585         if (tp->link_config.active_duplex == DUPLEX_HALF)
1586                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1587
1588         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1590                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1591                     (current_link_up == 1 &&
1592                      tp->link_config.active_speed == SPEED_10))
1593                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1594         } else {
1595                 if (current_link_up == 1)
1596                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1597         }
1598
1599         /* ??? Without this setting Netgear GA302T PHY does not
1600          * ??? send/receive packets...
1601          */
1602         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1603             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1604                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1605                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1606                 udelay(80);
1607         }
1608
1609         tw32_f(MAC_MODE, tp->mac_mode);
1610         udelay(40);
1611
1612         if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
1613                 /* Polled via timer. */
1614                 tw32_f(MAC_EVENT, 0);
1615         } else {
1616                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1617         }
1618         udelay(40);
1619
1620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1621             current_link_up == 1 &&
1622             tp->link_config.active_speed == SPEED_1000 &&
1623             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1624              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1625                 udelay(120);
1626                 tw32_f(MAC_STATUS,
1627                      (MAC_STATUS_SYNC_CHANGED |
1628                       MAC_STATUS_CFG_CHANGED));
1629                 udelay(40);
1630                 tg3_write_mem(tp,
1631                               NIC_SRAM_FIRMWARE_MBOX,
1632                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1633         }
1634
1635         if (current_link_up != netif_carrier_ok(tp->dev)) {
1636                 if (current_link_up)
1637                         netif_carrier_on(tp->dev);
1638                 else
1639                         netif_carrier_off(tp->dev);
1640                 tg3_link_report(tp);
1641         }
1642
1643         return 0;
1644 }
1645
1646 struct tg3_fiber_aneginfo {
1647         int state;
1648 #define ANEG_STATE_UNKNOWN              0
1649 #define ANEG_STATE_AN_ENABLE            1
1650 #define ANEG_STATE_RESTART_INIT         2
1651 #define ANEG_STATE_RESTART              3
1652 #define ANEG_STATE_DISABLE_LINK_OK      4
1653 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1654 #define ANEG_STATE_ABILITY_DETECT       6
1655 #define ANEG_STATE_ACK_DETECT_INIT      7
1656 #define ANEG_STATE_ACK_DETECT           8
1657 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1658 #define ANEG_STATE_COMPLETE_ACK         10
1659 #define ANEG_STATE_IDLE_DETECT_INIT     11
1660 #define ANEG_STATE_IDLE_DETECT          12
1661 #define ANEG_STATE_LINK_OK              13
1662 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1663 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1664
1665         u32 flags;
1666 #define MR_AN_ENABLE            0x00000001
1667 #define MR_RESTART_AN           0x00000002
1668 #define MR_AN_COMPLETE          0x00000004
1669 #define MR_PAGE_RX              0x00000008
1670 #define MR_NP_LOADED            0x00000010
1671 #define MR_TOGGLE_TX            0x00000020
1672 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1673 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1674 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1675 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1676 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1677 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1678 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1679 #define MR_TOGGLE_RX            0x00002000
1680 #define MR_NP_RX                0x00004000
1681
1682 #define MR_LINK_OK              0x80000000
1683
1684         unsigned long link_time, cur_time;
1685
1686         u32 ability_match_cfg;
1687         int ability_match_count;
1688
1689         char ability_match, idle_match, ack_match;
1690
1691         u32 txconfig, rxconfig;
1692 #define ANEG_CFG_NP             0x00000080
1693 #define ANEG_CFG_ACK            0x00000040
1694 #define ANEG_CFG_RF2            0x00000020
1695 #define ANEG_CFG_RF1            0x00000010
1696 #define ANEG_CFG_PS2            0x00000001
1697 #define ANEG_CFG_PS1            0x00008000
1698 #define ANEG_CFG_HD             0x00004000
1699 #define ANEG_CFG_FD             0x00002000
1700 #define ANEG_CFG_INVAL          0x00001f06
1701
1702 };
1703 #define ANEG_OK         0
1704 #define ANEG_DONE       1
1705 #define ANEG_TIMER_ENAB 2
1706 #define ANEG_FAILED     -1
1707
1708 #define ANEG_STATE_SETTLE_TIME  10000
1709
1710 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1711                                    struct tg3_fiber_aneginfo *ap)
1712 {
1713         unsigned long delta;
1714         u32 rx_cfg_reg;
1715         int ret;
1716
1717         if (ap->state == ANEG_STATE_UNKNOWN) {
1718                 ap->rxconfig = 0;
1719                 ap->link_time = 0;
1720                 ap->cur_time = 0;
1721                 ap->ability_match_cfg = 0;
1722                 ap->ability_match_count = 0;
1723                 ap->ability_match = 0;
1724                 ap->idle_match = 0;
1725                 ap->ack_match = 0;
1726         }
1727         ap->cur_time++;
1728
1729         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1730                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1731
1732                 if (rx_cfg_reg != ap->ability_match_cfg) {
1733                         ap->ability_match_cfg = rx_cfg_reg;
1734                         ap->ability_match = 0;
1735                         ap->ability_match_count = 0;
1736                 } else {
1737                         if (++ap->ability_match_count > 1) {
1738                                 ap->ability_match = 1;
1739                                 ap->ability_match_cfg = rx_cfg_reg;
1740                         }
1741                 }
1742                 if (rx_cfg_reg & ANEG_CFG_ACK)
1743                         ap->ack_match = 1;
1744                 else
1745                         ap->ack_match = 0;
1746
1747                 ap->idle_match = 0;
1748         } else {
1749                 ap->idle_match = 1;
1750                 ap->ability_match_cfg = 0;
1751                 ap->ability_match_count = 0;
1752                 ap->ability_match = 0;
1753                 ap->ack_match = 0;
1754
1755                 rx_cfg_reg = 0;
1756         }
1757
1758         ap->rxconfig = rx_cfg_reg;
1759         ret = ANEG_OK;
1760
1761         switch(ap->state) {
1762         case ANEG_STATE_UNKNOWN:
1763                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1764                         ap->state = ANEG_STATE_AN_ENABLE;
1765
1766                 /* fallthru */
1767         case ANEG_STATE_AN_ENABLE:
1768                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1769                 if (ap->flags & MR_AN_ENABLE) {
1770                         ap->link_time = 0;
1771                         ap->cur_time = 0;
1772                         ap->ability_match_cfg = 0;
1773                         ap->ability_match_count = 0;
1774                         ap->ability_match = 0;
1775                         ap->idle_match = 0;
1776                         ap->ack_match = 0;
1777
1778                         ap->state = ANEG_STATE_RESTART_INIT;
1779                 } else {
1780                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1781                 }
1782                 break;
1783
1784         case ANEG_STATE_RESTART_INIT:
1785                 ap->link_time = ap->cur_time;
1786                 ap->flags &= ~(MR_NP_LOADED);
1787                 ap->txconfig = 0;
1788                 tw32(MAC_TX_AUTO_NEG, 0);
1789                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1790                 tw32_f(MAC_MODE, tp->mac_mode);
1791                 udelay(40);
1792
1793                 ret = ANEG_TIMER_ENAB;
1794                 ap->state = ANEG_STATE_RESTART;
1795
1796                 /* fallthru */
1797         case ANEG_STATE_RESTART:
1798                 delta = ap->cur_time - ap->link_time;
1799                 if (delta > ANEG_STATE_SETTLE_TIME) {
1800                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1801                 } else {
1802                         ret = ANEG_TIMER_ENAB;
1803                 }
1804                 break;
1805
1806         case ANEG_STATE_DISABLE_LINK_OK:
1807                 ret = ANEG_DONE;
1808                 break;
1809
1810         case ANEG_STATE_ABILITY_DETECT_INIT:
1811                 ap->flags &= ~(MR_TOGGLE_TX);
1812                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1813                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1814                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1815                 tw32_f(MAC_MODE, tp->mac_mode);
1816                 udelay(40);
1817
1818                 ap->state = ANEG_STATE_ABILITY_DETECT;
1819                 break;
1820
1821         case ANEG_STATE_ABILITY_DETECT:
1822                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1823                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1824                 }
1825                 break;
1826
1827         case ANEG_STATE_ACK_DETECT_INIT:
1828                 ap->txconfig |= ANEG_CFG_ACK;
1829                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1830                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1831                 tw32_f(MAC_MODE, tp->mac_mode);
1832                 udelay(40);
1833
1834                 ap->state = ANEG_STATE_ACK_DETECT;
1835
1836                 /* fallthru */
1837         case ANEG_STATE_ACK_DETECT:
1838                 if (ap->ack_match != 0) {
1839                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1840                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1841                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1842                         } else {
1843                                 ap->state = ANEG_STATE_AN_ENABLE;
1844                         }
1845                 } else if (ap->ability_match != 0 &&
1846                            ap->rxconfig == 0) {
1847                         ap->state = ANEG_STATE_AN_ENABLE;
1848                 }
1849                 break;
1850
1851         case ANEG_STATE_COMPLETE_ACK_INIT:
1852                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1853                         ret = ANEG_FAILED;
1854                         break;
1855                 }
1856                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1857                                MR_LP_ADV_HALF_DUPLEX |
1858                                MR_LP_ADV_SYM_PAUSE |
1859                                MR_LP_ADV_ASYM_PAUSE |
1860                                MR_LP_ADV_REMOTE_FAULT1 |
1861                                MR_LP_ADV_REMOTE_FAULT2 |
1862                                MR_LP_ADV_NEXT_PAGE |
1863                                MR_TOGGLE_RX |
1864                                MR_NP_RX);
1865                 if (ap->rxconfig & ANEG_CFG_FD)
1866                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1867                 if (ap->rxconfig & ANEG_CFG_HD)
1868                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1869                 if (ap->rxconfig & ANEG_CFG_PS1)
1870                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1871                 if (ap->rxconfig & ANEG_CFG_PS2)
1872                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1873                 if (ap->rxconfig & ANEG_CFG_RF1)
1874                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1875                 if (ap->rxconfig & ANEG_CFG_RF2)
1876                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1877                 if (ap->rxconfig & ANEG_CFG_NP)
1878                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1879
1880                 ap->link_time = ap->cur_time;
1881
1882                 ap->flags ^= (MR_TOGGLE_TX);
1883                 if (ap->rxconfig & 0x0008)
1884                         ap->flags |= MR_TOGGLE_RX;
1885                 if (ap->rxconfig & ANEG_CFG_NP)
1886                         ap->flags |= MR_NP_RX;
1887                 ap->flags |= MR_PAGE_RX;
1888
1889                 ap->state = ANEG_STATE_COMPLETE_ACK;
1890                 ret = ANEG_TIMER_ENAB;
1891                 break;
1892
1893         case ANEG_STATE_COMPLETE_ACK:
1894                 if (ap->ability_match != 0 &&
1895                     ap->rxconfig == 0) {
1896                         ap->state = ANEG_STATE_AN_ENABLE;
1897                         break;
1898                 }
1899                 delta = ap->cur_time - ap->link_time;
1900                 if (delta > ANEG_STATE_SETTLE_TIME) {
1901                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1902                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1903                         } else {
1904                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1905                                     !(ap->flags & MR_NP_RX)) {
1906                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1907                                 } else {
1908                                         ret = ANEG_FAILED;
1909                                 }
1910                         }
1911                 }
1912                 break;
1913
1914         case ANEG_STATE_IDLE_DETECT_INIT:
1915                 ap->link_time = ap->cur_time;
1916                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1917                 tw32_f(MAC_MODE, tp->mac_mode);
1918                 udelay(40);
1919
1920                 ap->state = ANEG_STATE_IDLE_DETECT;
1921                 ret = ANEG_TIMER_ENAB;
1922                 break;
1923
1924         case ANEG_STATE_IDLE_DETECT:
1925                 if (ap->ability_match != 0 &&
1926                     ap->rxconfig == 0) {
1927                         ap->state = ANEG_STATE_AN_ENABLE;
1928                         break;
1929                 }
1930                 delta = ap->cur_time - ap->link_time;
1931                 if (delta > ANEG_STATE_SETTLE_TIME) {
1932                         /* XXX another gem from the Broadcom driver :( */
1933                         ap->state = ANEG_STATE_LINK_OK;
1934                 }
1935                 break;
1936
1937         case ANEG_STATE_LINK_OK:
1938                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1939                 ret = ANEG_DONE;
1940                 break;
1941
1942         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1943                 /* ??? unimplemented */
1944                 break;
1945
1946         case ANEG_STATE_NEXT_PAGE_WAIT:
1947                 /* ??? unimplemented */
1948                 break;
1949
1950         default:
1951                 ret = ANEG_FAILED;
1952                 break;
1953         };
1954
1955         return ret;
1956 }
1957
1958 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
1959 {
1960         u32 orig_pause_cfg;
1961         u16 orig_active_speed;
1962         u8 orig_active_duplex;
1963         int current_link_up;
1964         int i;
1965
1966         orig_pause_cfg =
1967                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1968                                   TG3_FLAG_TX_PAUSE));
1969         orig_active_speed = tp->link_config.active_speed;
1970         orig_active_duplex = tp->link_config.active_duplex;
1971
1972         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1973         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1974         tw32_f(MAC_MODE, tp->mac_mode);
1975         udelay(40);
1976
1977         /* Reset when initting first time or we have a link. */
1978         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1979             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1980                 /* Set PLL lock range. */
1981                 tg3_writephy(tp, 0x16, 0x8007);
1982
1983                 /* SW reset */
1984                 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1985
1986                 /* Wait for reset to complete. */
1987                 /* XXX schedule_timeout() ... */
1988                 for (i = 0; i < 500; i++)
1989                         udelay(10);
1990
1991                 /* Config mode; select PMA/Ch 1 regs. */
1992                 tg3_writephy(tp, 0x10, 0x8411);
1993
1994                 /* Enable auto-lock and comdet, select txclk for tx. */
1995                 tg3_writephy(tp, 0x11, 0x0a10);
1996
1997                 tg3_writephy(tp, 0x18, 0x00a0);
1998                 tg3_writephy(tp, 0x16, 0x41ff);
1999
2000                 /* Assert and deassert POR. */
2001                 tg3_writephy(tp, 0x13, 0x0400);
2002                 udelay(40);
2003                 tg3_writephy(tp, 0x13, 0x0000);
2004
2005                 tg3_writephy(tp, 0x11, 0x0a50);
2006                 udelay(40);
2007                 tg3_writephy(tp, 0x11, 0x0a10);
2008
2009                 /* Wait for signal to stabilize */
2010                 /* XXX schedule_timeout() ... */
2011                 for (i = 0; i < 15000; i++)
2012                         udelay(10);
2013
2014                 /* Deselect the channel register so we can read the PHYID
2015                  * later.
2016                  */
2017                 tg3_writephy(tp, 0x10, 0x8011);
2018         }
2019
2020         /* Enable link change interrupt unless serdes polling.  */
2021         if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
2022                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2023         else
2024                 tw32_f(MAC_EVENT, 0);
2025         udelay(40);
2026
2027         current_link_up = 0;
2028         if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
2029                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2030                     !(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
2031                         struct tg3_fiber_aneginfo aninfo;
2032                         int status = ANEG_FAILED;
2033                         unsigned int tick;
2034                         u32 tmp;
2035
2036                         memset(&aninfo, 0, sizeof(aninfo));
2037                         aninfo.flags |= (MR_AN_ENABLE);
2038
2039                         tw32(MAC_TX_AUTO_NEG, 0);
2040
2041                         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2042                         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2043                         udelay(40);
2044
2045                         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2046                         udelay(40);
2047
2048                         aninfo.state = ANEG_STATE_UNKNOWN;
2049                         aninfo.cur_time = 0;
2050                         tick = 0;
2051                         while (++tick < 195000) {
2052                                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2053                                 if (status == ANEG_DONE ||
2054                                     status == ANEG_FAILED)
2055                                         break;
2056
2057                                 udelay(1);
2058                         }
2059
2060                         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2061                         tw32_f(MAC_MODE, tp->mac_mode);
2062                         udelay(40);
2063
2064                         if (status == ANEG_DONE &&
2065                             (aninfo.flags &
2066                              (MR_AN_COMPLETE | MR_LINK_OK |
2067                               MR_LP_ADV_FULL_DUPLEX))) {
2068                                 u32 local_adv, remote_adv;
2069
2070                                 local_adv = ADVERTISE_PAUSE_CAP;
2071                                 remote_adv = 0;
2072                                 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
2073                                         remote_adv |= LPA_PAUSE_CAP;
2074                                 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
2075                                         remote_adv |= LPA_PAUSE_ASYM;
2076
2077                                 tg3_setup_flow_control(tp, local_adv, remote_adv);
2078
2079                                 tp->tg3_flags |=
2080                                         TG3_FLAG_GOT_SERDES_FLOWCTL;
2081                                 current_link_up = 1;
2082                         }
2083                         for (i = 0; i < 60; i++) {
2084                                 udelay(20);
2085                                 tw32_f(MAC_STATUS,
2086                                      (MAC_STATUS_SYNC_CHANGED |
2087                                       MAC_STATUS_CFG_CHANGED));
2088                                 udelay(40);
2089                                 if ((tr32(MAC_STATUS) &
2090                                      (MAC_STATUS_SYNC_CHANGED |
2091                                       MAC_STATUS_CFG_CHANGED)) == 0)
2092                                         break;
2093                         }
2094                         if (current_link_up == 0 &&
2095                             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2096                                 current_link_up = 1;
2097                         }
2098                 } else {
2099                         /* Forcing 1000FD link up. */
2100                         current_link_up = 1;
2101                 }
2102         }
2103
2104         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2105         tw32_f(MAC_MODE, tp->mac_mode);
2106         udelay(40);
2107
2108         tp->hw_status->status =
2109                 (SD_STATUS_UPDATED |
2110                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2111
2112         for (i = 0; i < 100; i++) {
2113                 udelay(20);
2114                 tw32_f(MAC_STATUS,
2115                      (MAC_STATUS_SYNC_CHANGED |
2116                       MAC_STATUS_CFG_CHANGED));
2117                 udelay(40);
2118                 if ((tr32(MAC_STATUS) &
2119                      (MAC_STATUS_SYNC_CHANGED |
2120                       MAC_STATUS_CFG_CHANGED)) == 0)
2121                         break;
2122         }
2123
2124         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
2125                 current_link_up = 0;
2126
2127         if (current_link_up == 1) {
2128                 tp->link_config.active_speed = SPEED_1000;
2129                 tp->link_config.active_duplex = DUPLEX_FULL;
2130                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2131                                     LED_CTRL_LNKLED_OVERRIDE |
2132                                     LED_CTRL_1000MBPS_ON));
2133         } else {
2134                 tp->link_config.active_speed = SPEED_INVALID;
2135                 tp->link_config.active_duplex = DUPLEX_INVALID;
2136                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2137                                     LED_CTRL_LNKLED_OVERRIDE |
2138                                     LED_CTRL_TRAFFIC_OVERRIDE));
2139         }
2140
2141         if (current_link_up != netif_carrier_ok(tp->dev)) {
2142                 if (current_link_up)
2143                         netif_carrier_on(tp->dev);
2144                 else
2145                         netif_carrier_off(tp->dev);
2146                 tg3_link_report(tp);
2147         } else {
2148                 u32 now_pause_cfg =
2149                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2150                                          TG3_FLAG_TX_PAUSE);
2151                 if (orig_pause_cfg != now_pause_cfg ||
2152                     orig_active_speed != tp->link_config.active_speed ||
2153                     orig_active_duplex != tp->link_config.active_duplex)
2154                         tg3_link_report(tp);
2155         }
2156
2157         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
2158                 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
2159                 udelay(40);
2160                 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
2161                         tw32_f(MAC_MODE, tp->mac_mode);
2162                         udelay(40);
2163                 }
2164         }
2165
2166         return 0;
2167 }
2168
2169 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2170 {
2171         int err;
2172
2173         if (tp->phy_id == PHY_ID_SERDES) {
2174                 err = tg3_setup_fiber_phy(tp, force_reset);
2175         } else {
2176                 err = tg3_setup_copper_phy(tp, force_reset);
2177         }
2178
2179         if (tp->link_config.active_speed == SPEED_1000 &&
2180             tp->link_config.active_duplex == DUPLEX_HALF)
2181                 tw32(MAC_TX_LENGTHS,
2182                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2183                       (6 << TX_LENGTHS_IPG_SHIFT) |
2184                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2185         else
2186                 tw32(MAC_TX_LENGTHS,
2187                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2188                       (6 << TX_LENGTHS_IPG_SHIFT) |
2189                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2190
2191         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2192             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
2193                 if (netif_carrier_ok(tp->dev)) {
2194                         tw32(HOSTCC_STAT_COAL_TICKS,
2195                              DEFAULT_STAT_COAL_TICKS);
2196                 } else {
2197                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2198                 }
2199         }
2200
2201         return err;
2202 }
2203
2204 /* Tigon3 never reports partial packet sends.  So we do not
2205  * need special logic to handle SKBs that have not had all
2206  * of their frags sent yet, like SunGEM does.
2207  */
2208 static void tg3_tx(struct tg3 *tp)
2209 {
2210         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2211         u32 sw_idx = tp->tx_cons;
2212
2213         while (sw_idx != hw_idx) {
2214                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2215                 struct sk_buff *skb = ri->skb;
2216                 int i;
2217
2218                 if (unlikely(skb == NULL))
2219                         BUG();
2220
2221                 pci_unmap_single(tp->pdev,
2222                                  pci_unmap_addr(ri, mapping),
2223                                  skb_headlen(skb),
2224                                  PCI_DMA_TODEVICE);
2225
2226                 ri->skb = NULL;
2227
2228                 sw_idx = NEXT_TX(sw_idx);
2229
2230                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2231                         if (unlikely(sw_idx == hw_idx))
2232                                 BUG();
2233
2234                         ri = &tp->tx_buffers[sw_idx];
2235                         if (unlikely(ri->skb != NULL))
2236                                 BUG();
2237
2238                         pci_unmap_page(tp->pdev,
2239                                        pci_unmap_addr(ri, mapping),
2240                                        skb_shinfo(skb)->frags[i].size,
2241                                        PCI_DMA_TODEVICE);
2242
2243                         sw_idx = NEXT_TX(sw_idx);
2244                 }
2245
2246                 dev_kfree_skb_irq(skb);
2247         }
2248
2249         tp->tx_cons = sw_idx;
2250
2251         if (netif_queue_stopped(tp->dev) &&
2252             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2253                 netif_wake_queue(tp->dev);
2254 }
2255
2256 /* Returns size of skb allocated or < 0 on error.
2257  *
2258  * We only need to fill in the address because the other members
2259  * of the RX descriptor are invariant, see tg3_init_rings.
2260  *
2261  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2262  * posting buffers we only dirty the first cache line of the RX
2263  * descriptor (containing the address).  Whereas for the RX status
2264  * buffers the cpu only reads the last cacheline of the RX descriptor
2265  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2266  */
2267 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2268                             int src_idx, u32 dest_idx_unmasked)
2269 {
2270         struct tg3_rx_buffer_desc *desc;
2271         struct ring_info *map, *src_map;
2272         struct sk_buff *skb;
2273         dma_addr_t mapping;
2274         int skb_size, dest_idx;
2275
2276         src_map = NULL;
2277         switch (opaque_key) {
2278         case RXD_OPAQUE_RING_STD:
2279                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2280                 desc = &tp->rx_std[dest_idx];
2281                 map = &tp->rx_std_buffers[dest_idx];
2282                 if (src_idx >= 0)
2283                         src_map = &tp->rx_std_buffers[src_idx];
2284                 skb_size = RX_PKT_BUF_SZ;
2285                 break;
2286
2287         case RXD_OPAQUE_RING_JUMBO:
2288                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2289                 desc = &tp->rx_jumbo[dest_idx];
2290                 map = &tp->rx_jumbo_buffers[dest_idx];
2291                 if (src_idx >= 0)
2292                         src_map = &tp->rx_jumbo_buffers[src_idx];
2293                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2294                 break;
2295
2296         default:
2297                 return -EINVAL;
2298         };
2299
2300         /* Do not overwrite any of the map or rp information
2301          * until we are sure we can commit to a new buffer.
2302          *
2303          * Callers depend upon this behavior and assume that
2304          * we leave everything unchanged if we fail.
2305          */
2306         skb = dev_alloc_skb(skb_size);
2307         if (skb == NULL)
2308                 return -ENOMEM;
2309
2310         skb->dev = tp->dev;
2311         skb_reserve(skb, tp->rx_offset);
2312
2313         mapping = pci_map_single(tp->pdev, skb->data,
2314                                  skb_size - tp->rx_offset,
2315                                  PCI_DMA_FROMDEVICE);
2316
2317         map->skb = skb;
2318         pci_unmap_addr_set(map, mapping, mapping);
2319
2320         if (src_map != NULL)
2321                 src_map->skb = NULL;
2322
2323         desc->addr_hi = ((u64)mapping >> 32);
2324         desc->addr_lo = ((u64)mapping & 0xffffffff);
2325
2326         return skb_size;
2327 }
2328
2329 /* We only need to move over in the address because the other
2330  * members of the RX descriptor are invariant.  See notes above
2331  * tg3_alloc_rx_skb for full details.
2332  */
2333 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2334                            int src_idx, u32 dest_idx_unmasked)
2335 {
2336         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2337         struct ring_info *src_map, *dest_map;
2338         int dest_idx;
2339
2340         switch (opaque_key) {
2341         case RXD_OPAQUE_RING_STD:
2342                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2343                 dest_desc = &tp->rx_std[dest_idx];
2344                 dest_map = &tp->rx_std_buffers[dest_idx];
2345                 src_desc = &tp->rx_std[src_idx];
2346                 src_map = &tp->rx_std_buffers[src_idx];
2347                 break;
2348
2349         case RXD_OPAQUE_RING_JUMBO:
2350                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2351                 dest_desc = &tp->rx_jumbo[dest_idx];
2352                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2353                 src_desc = &tp->rx_jumbo[src_idx];
2354                 src_map = &tp->rx_jumbo_buffers[src_idx];
2355                 break;
2356
2357         default:
2358                 return;
2359         };
2360
2361         dest_map->skb = src_map->skb;
2362         pci_unmap_addr_set(dest_map, mapping,
2363                            pci_unmap_addr(src_map, mapping));
2364         dest_desc->addr_hi = src_desc->addr_hi;
2365         dest_desc->addr_lo = src_desc->addr_lo;
2366
2367         src_map->skb = NULL;
2368 }
2369
2370 #if TG3_VLAN_TAG_USED
2371 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2372 {
2373         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2374 }
2375 #endif
2376
2377 /* The RX ring scheme is composed of multiple rings which post fresh
2378  * buffers to the chip, and one special ring the chip uses to report
2379  * status back to the host.
2380  *
2381  * The special ring reports the status of received packets to the
2382  * host.  The chip does not write into the original descriptor the
2383  * RX buffer was obtained from.  The chip simply takes the original
2384  * descriptor as provided by the host, updates the status and length
2385  * field, then writes this into the next status ring entry.
2386  *
2387  * Each ring the host uses to post buffers to the chip is described
2388  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2389  * it is first placed into the on-chip ram.  When the packet's length
2390  * is known, it walks down the TG3_BDINFO entries to select the ring.
2391  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2392  * which is within the range of the new packet's length is chosen.
2393  *
2394  * The "separate ring for rx status" scheme may sound queer, but it makes
2395  * sense from a cache coherency perspective.  If only the host writes
2396  * to the buffer post rings, and only the chip writes to the rx status
2397  * rings, then cache lines never move beyond shared-modified state.
2398  * If both the host and chip were to write into the same ring, cache line
2399  * eviction could occur since both entities want it in an exclusive state.
2400  */
2401 static int tg3_rx(struct tg3 *tp, int budget)
2402 {
2403         u32 work_mask;
2404         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2405         u16 hw_idx, sw_idx;
2406         int received;
2407
2408         hw_idx = tp->hw_status->idx[0].rx_producer;
2409         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2410         work_mask = 0;
2411         received = 0;
2412         while (sw_idx != hw_idx && budget > 0) {
2413                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2414                 unsigned int len;
2415                 struct sk_buff *skb;
2416                 dma_addr_t dma_addr;
2417                 u32 opaque_key, desc_idx, *post_ptr;
2418
2419                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2420                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2421                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2422                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2423                                                   mapping);
2424                         skb = tp->rx_std_buffers[desc_idx].skb;
2425                         post_ptr = &tp->rx_std_ptr;
2426                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2427                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2428                                                   mapping);
2429                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2430                         post_ptr = &tp->rx_jumbo_ptr;
2431                 }
2432                 else {
2433                         goto next_pkt_nopost;
2434                 }
2435
2436                 work_mask |= opaque_key;
2437
2438                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2439                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2440                 drop_it:
2441                         tg3_recycle_rx(tp, opaque_key,
2442                                        desc_idx, *post_ptr);
2443                 drop_it_no_recycle:
2444                         /* Other statistics kept track of by card. */
2445                         tp->net_stats.rx_dropped++;
2446                         goto next_pkt;
2447                 }
2448
2449                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2450
2451                 if (len > RX_COPY_THRESHOLD) {
2452                         int skb_size;
2453
2454                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2455                                                     desc_idx, *post_ptr);
2456                         if (skb_size < 0)
2457                                 goto drop_it;
2458
2459                         pci_unmap_single(tp->pdev, dma_addr,
2460                                          skb_size - tp->rx_offset,
2461                                          PCI_DMA_FROMDEVICE);
2462
2463                         skb_put(skb, len);
2464                 } else {
2465                         struct sk_buff *copy_skb;
2466
2467                         tg3_recycle_rx(tp, opaque_key,
2468                                        desc_idx, *post_ptr);
2469
2470                         copy_skb = dev_alloc_skb(len + 2);
2471                         if (copy_skb == NULL)
2472                                 goto drop_it_no_recycle;
2473
2474                         copy_skb->dev = tp->dev;
2475                         skb_reserve(copy_skb, 2);
2476                         skb_put(copy_skb, len);
2477                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2478                         memcpy(copy_skb->data, skb->data, len);
2479                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2480
2481                         /* We'll reuse the original ring buffer. */
2482                         skb = copy_skb;
2483                 }
2484
2485                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2486                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2487                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2488                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2489                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2490                 else
2491                         skb->ip_summed = CHECKSUM_NONE;
2492
2493                 skb->protocol = eth_type_trans(skb, tp->dev);
2494 #if TG3_VLAN_TAG_USED
2495                 if (tp->vlgrp != NULL &&
2496                     desc->type_flags & RXD_FLAG_VLAN) {
2497                         tg3_vlan_rx(tp, skb,
2498                                     desc->err_vlan & RXD_VLAN_MASK);
2499                 } else
2500 #endif
2501                         netif_receive_skb(skb);
2502
2503                 tp->dev->last_rx = jiffies;
2504                 received++;
2505                 budget--;
2506
2507 next_pkt:
2508                 (*post_ptr)++;
2509 next_pkt_nopost:
2510                 rx_rcb_ptr++;
2511                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2512         }
2513
2514         /* ACK the status ring. */
2515         tp->rx_rcb_ptr = rx_rcb_ptr;
2516         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2517                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2518
2519         /* Refill RX ring(s). */
2520         if (work_mask & RXD_OPAQUE_RING_STD) {
2521                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2522                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2523                              sw_idx);
2524         }
2525         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2526                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2527                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2528                              sw_idx);
2529         }
2530
2531         return received;
2532 }
2533
2534 static int tg3_poll(struct net_device *netdev, int *budget)
2535 {
2536         struct tg3 *tp = netdev_priv(netdev);
2537         struct tg3_hw_status *sblk = tp->hw_status;
2538         unsigned long flags;
2539         int done;
2540
2541         spin_lock_irqsave(&tp->lock, flags);
2542
2543         /* handle link change and other phy events */
2544         if (!(tp->tg3_flags &
2545               (TG3_FLAG_USE_LINKCHG_REG |
2546                TG3_FLAG_POLL_SERDES))) {
2547                 if (sblk->status & SD_STATUS_LINK_CHG) {
2548                         sblk->status = SD_STATUS_UPDATED |
2549                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2550                         tg3_setup_phy(tp, 0);
2551                 }
2552         }
2553
2554         /* run TX completion thread */
2555         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2556                 spin_lock(&tp->tx_lock);
2557                 tg3_tx(tp);
2558                 spin_unlock(&tp->tx_lock);
2559         }
2560
2561         spin_unlock_irqrestore(&tp->lock, flags);
2562
2563         /* run RX thread, within the bounds set by NAPI.
2564          * All RX "locking" is done by ensuring outside
2565          * code synchronizes with dev->poll()
2566          */
2567         done = 1;
2568         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2569                 int orig_budget = *budget;
2570                 int work_done;
2571
2572                 if (orig_budget > netdev->quota)
2573                         orig_budget = netdev->quota;
2574
2575                 work_done = tg3_rx(tp, orig_budget);
2576
2577                 *budget -= work_done;
2578                 netdev->quota -= work_done;
2579
2580                 if (work_done >= orig_budget)
2581                         done = 0;
2582         }
2583
2584         /* if no more work, tell net stack and NIC we're done */
2585         if (done) {
2586                 spin_lock_irqsave(&tp->lock, flags);
2587                 __netif_rx_complete(netdev);
2588                 tg3_enable_ints(tp);
2589                 spin_unlock_irqrestore(&tp->lock, flags);
2590         }
2591
2592         return (done ? 0 : 1);
2593 }
2594
2595 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2596 {
2597         struct tg3_hw_status *sblk = tp->hw_status;
2598         unsigned int work_exists = 0;
2599
2600         /* check for phy events */
2601         if (!(tp->tg3_flags &
2602               (TG3_FLAG_USE_LINKCHG_REG |
2603                TG3_FLAG_POLL_SERDES))) {
2604                 if (sblk->status & SD_STATUS_LINK_CHG)
2605                         work_exists = 1;
2606         }
2607         /* check for RX/TX work to do */
2608         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2609             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2610                 work_exists = 1;
2611
2612         return work_exists;
2613 }
2614
2615 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2616 {
2617         struct net_device *dev = dev_id;
2618         struct tg3 *tp = netdev_priv(dev);
2619         struct tg3_hw_status *sblk = tp->hw_status;
2620         unsigned long flags;
2621         unsigned int handled = 1;
2622
2623         spin_lock_irqsave(&tp->lock, flags);
2624
2625         if (sblk->status & SD_STATUS_UPDATED) {
2626                 /*
2627                  * writing any value to intr-mbox-0 clears PCI INTA# and
2628                  * chip-internal interrupt pending events.
2629                  * writing non-zero to intr-mbox-0 additional tells the
2630                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2631                  * event coalescing.
2632                  */
2633                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2634                              0x00000001);
2635                 /*
2636                  * Flush PCI write.  This also guarantees that our
2637                  * status block has been flushed to host memory.
2638                  */
2639                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2640                 sblk->status &= ~SD_STATUS_UPDATED;
2641
2642                 if (likely(tg3_has_work(dev, tp)))
2643                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2644                 else {
2645                         /* no work, shared interrupt perhaps?  re-enable
2646                          * interrupts, and flush that PCI write
2647                          */
2648                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2649                                 0x00000000);
2650                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2651                 }
2652         } else {        /* shared interrupt */
2653                 handled = 0;
2654         }
2655
2656         spin_unlock_irqrestore(&tp->lock, flags);
2657
2658         return IRQ_RETVAL(handled);
2659 }
2660
2661 static int tg3_init_hw(struct tg3 *);
2662 static int tg3_halt(struct tg3 *);
2663
2664 #ifdef CONFIG_NET_POLL_CONTROLLER
2665 static void tg3_poll_controller(struct net_device *dev)
2666 {
2667         tg3_interrupt(dev->irq, dev, NULL);
2668 }
2669 #endif
2670
2671 static void tg3_reset_task(void *_data)
2672 {
2673         struct tg3 *tp = _data;
2674         unsigned int restart_timer;
2675
2676         tg3_netif_stop(tp);
2677
2678         spin_lock_irq(&tp->lock);
2679         spin_lock(&tp->tx_lock);
2680
2681         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2682         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2683
2684         tg3_halt(tp);
2685         tg3_init_hw(tp);
2686
2687         spin_unlock(&tp->tx_lock);
2688         spin_unlock_irq(&tp->lock);
2689
2690         tg3_netif_start(tp);
2691
2692         if (restart_timer)
2693                 mod_timer(&tp->timer, jiffies + 1);
2694 }
2695
2696 static void tg3_tx_timeout(struct net_device *dev)
2697 {
2698         struct tg3 *tp = netdev_priv(dev);
2699
2700         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2701                dev->name);
2702
2703         schedule_work(&tp->reset_task);
2704 }
2705
2706 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2707
2708 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2709                                        u32 guilty_entry, int guilty_len,
2710                                        u32 last_plus_one, u32 *start, u32 mss)
2711 {
2712         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2713         dma_addr_t new_addr;
2714         u32 entry = *start;
2715         int i;
2716
2717         if (!new_skb) {
2718                 dev_kfree_skb(skb);
2719                 return -1;
2720         }
2721
2722         /* New SKB is guaranteed to be linear. */
2723         entry = *start;
2724         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2725                                   PCI_DMA_TODEVICE);
2726         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2727                     (skb->ip_summed == CHECKSUM_HW) ?
2728                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2729         *start = NEXT_TX(entry);
2730
2731         /* Now clean up the sw ring entries. */
2732         i = 0;
2733         while (entry != last_plus_one) {
2734                 int len;
2735
2736                 if (i == 0)
2737                         len = skb_headlen(skb);
2738                 else
2739                         len = skb_shinfo(skb)->frags[i-1].size;
2740                 pci_unmap_single(tp->pdev,
2741                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2742                                  len, PCI_DMA_TODEVICE);
2743                 if (i == 0) {
2744                         tp->tx_buffers[entry].skb = new_skb;
2745                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2746                 } else {
2747                         tp->tx_buffers[entry].skb = NULL;
2748                 }
2749                 entry = NEXT_TX(entry);
2750         }
2751
2752         dev_kfree_skb(skb);
2753
2754         return 0;
2755 }
2756
2757 static void tg3_set_txd(struct tg3 *tp, int entry,
2758                         dma_addr_t mapping, int len, u32 flags,
2759                         u32 mss_and_is_end)
2760 {
2761         int is_end = (mss_and_is_end & 0x1);
2762         u32 mss = (mss_and_is_end >> 1);
2763         u32 vlan_tag = 0;
2764
2765         if (is_end)
2766                 flags |= TXD_FLAG_END;
2767         if (flags & TXD_FLAG_VLAN) {
2768                 vlan_tag = flags >> 16;
2769                 flags &= 0xffff;
2770         }
2771         vlan_tag |= (mss << TXD_MSS_SHIFT);
2772         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2773                 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2774
2775                 txd->addr_hi = ((u64) mapping >> 32);
2776                 txd->addr_lo = ((u64) mapping & 0xffffffff);
2777                 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2778                 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2779         } else {
2780                 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2781                 unsigned long txd;
2782
2783                 txd = (tp->regs +
2784                        NIC_SRAM_WIN_BASE +
2785                        NIC_SRAM_TX_BUFFER_DESC);
2786                 txd += (entry * TXD_SIZE);
2787
2788                 /* Save some PIOs */
2789                 if (sizeof(dma_addr_t) != sizeof(u32))
2790                         writel(((u64) mapping >> 32),
2791                                txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2792
2793                 writel(((u64) mapping & 0xffffffff),
2794                        txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2795                 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2796                 if (txr->prev_vlan_tag != vlan_tag) {
2797                         writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2798                         txr->prev_vlan_tag = vlan_tag;
2799                 }
2800         }
2801 }
2802
2803 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2804 {
2805         u32 base = (u32) mapping & 0xffffffff;
2806
2807         return ((base > 0xffffdcc0) &&
2808                 ((u64) mapping >> 32) == 0 &&
2809                 (base + len + 8 < base));
2810 }
2811
2812 static int tg3_start_xmit_4gbug(struct sk_buff *skb, struct net_device *dev)
2813 {
2814         struct tg3 *tp = netdev_priv(dev);
2815         dma_addr_t mapping;
2816         unsigned int i;
2817         u32 len, entry, base_flags, mss;
2818         int would_hit_hwbug;
2819         unsigned long flags;
2820
2821         len = skb_headlen(skb);
2822
2823         /* No BH disabling for tx_lock here.  We are running in BH disabled
2824          * context and TX reclaim runs via tp->poll inside of a software
2825          * interrupt.  Rejoice!
2826          *
2827          * Actually, things are not so simple.  If we are to take a hw
2828          * IRQ here, we can deadlock, consider:
2829          *
2830          *       CPU1           CPU2
2831          *   tg3_start_xmit
2832          *   take tp->tx_lock
2833          *                      tg3_timer
2834          *                      take tp->lock
2835          *   tg3_interrupt
2836          *   spin on tp->lock
2837          *                      spin on tp->tx_lock
2838          *
2839          * So we really do need to disable interrupts when taking
2840          * tx_lock here.
2841          */
2842         spin_lock_irqsave(&tp->tx_lock, flags);
2843
2844         /* This is a hard error, log it. */
2845         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2846                 netif_stop_queue(dev);
2847                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2848                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2849                        dev->name);
2850                 return 1;
2851         }
2852
2853         entry = tp->tx_prod;
2854         base_flags = 0;
2855         if (skb->ip_summed == CHECKSUM_HW)
2856                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2857 #if TG3_TSO_SUPPORT != 0
2858         mss = 0;
2859         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2860             (mss = skb_shinfo(skb)->tso_size) != 0) {
2861                 int tcp_opt_len, ip_tcp_len;
2862
2863                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2864                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2865
2866                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2867                                TXD_FLAG_CPU_POST_DMA);
2868
2869                 skb->nh.iph->check = 0;
2870                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2871                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2872                                                       skb->nh.iph->daddr,
2873                                                       0, IPPROTO_TCP, 0);
2874
2875                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2876                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2877                                 int tsflags;
2878
2879                                 tsflags = ((skb->nh.iph->ihl - 5) +
2880                                            (tcp_opt_len >> 2));
2881                                 mss |= (tsflags << 11);
2882                         }
2883                 } else {
2884                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2885                                 int tsflags;
2886
2887                                 tsflags = ((skb->nh.iph->ihl - 5) +
2888                                            (tcp_opt_len >> 2));
2889                                 base_flags |= tsflags << 12;
2890                         }
2891                 }
2892         }
2893 #else
2894         mss = 0;
2895 #endif
2896 #if TG3_VLAN_TAG_USED
2897         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2898                 base_flags |= (TXD_FLAG_VLAN |
2899                                (vlan_tx_tag_get(skb) << 16));
2900 #endif
2901
2902         /* Queue skb data, a.k.a. the main skb fragment. */
2903         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2904
2905         tp->tx_buffers[entry].skb = skb;
2906         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2907
2908         would_hit_hwbug = 0;
2909
2910         if (tg3_4g_overflow_test(mapping, len))
2911                 would_hit_hwbug = entry + 1;
2912
2913         tg3_set_txd(tp, entry, mapping, len, base_flags,
2914                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2915
2916         entry = NEXT_TX(entry);
2917
2918         /* Now loop through additional data fragments, and queue them. */
2919         if (skb_shinfo(skb)->nr_frags > 0) {
2920                 unsigned int i, last;
2921
2922                 last = skb_shinfo(skb)->nr_frags - 1;
2923                 for (i = 0; i <= last; i++) {
2924                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2925
2926                         len = frag->size;
2927                         mapping = pci_map_page(tp->pdev,
2928                                                frag->page,
2929                                                frag->page_offset,
2930                                                len, PCI_DMA_TODEVICE);
2931
2932                         tp->tx_buffers[entry].skb = NULL;
2933                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2934
2935                         if (tg3_4g_overflow_test(mapping, len)) {
2936                                 /* Only one should match. */
2937                                 if (would_hit_hwbug)
2938                                         BUG();
2939                                 would_hit_hwbug = entry + 1;
2940                         }
2941
2942                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
2943                                 tg3_set_txd(tp, entry, mapping, len,
2944                                             base_flags, (i == last)|(mss << 1));
2945                         else
2946                                 tg3_set_txd(tp, entry, mapping, len,
2947                                             base_flags, (i == last));
2948
2949                         entry = NEXT_TX(entry);
2950                 }
2951         }
2952
2953         if (would_hit_hwbug) {
2954                 u32 last_plus_one = entry;
2955                 u32 start;
2956                 unsigned int len = 0;
2957
2958                 would_hit_hwbug -= 1;
2959                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
2960                 entry &= (TG3_TX_RING_SIZE - 1);
2961                 start = entry;
2962                 i = 0;
2963                 while (entry != last_plus_one) {
2964                         if (i == 0)
2965                                 len = skb_headlen(skb);
2966                         else
2967                                 len = skb_shinfo(skb)->frags[i-1].size;
2968
2969                         if (entry == would_hit_hwbug)
2970                                 break;
2971
2972                         i++;
2973                         entry = NEXT_TX(entry);
2974
2975                 }
2976
2977                 /* If the workaround fails due to memory/mapping
2978                  * failure, silently drop this packet.
2979                  */
2980                 if (tigon3_4gb_hwbug_workaround(tp, skb,
2981                                                 entry, len,
2982                                                 last_plus_one,
2983                                                 &start, mss))
2984                         goto out_unlock;
2985
2986                 entry = start;
2987         }
2988
2989         /* Packets are ready, update Tx producer idx local and on card. */
2990         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2991                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2992                               TG3_64BIT_REG_LOW), entry);
2993         } else {
2994                 /* First, make sure tg3 sees last descriptor fully
2995                  * in SRAM.
2996                  */
2997                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2998                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
2999
3000                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3001                               TG3_64BIT_REG_LOW), entry);
3002         }
3003
3004         tp->tx_prod = entry;
3005         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3006                 netif_stop_queue(dev);
3007
3008 out_unlock:
3009         spin_unlock_irqrestore(&tp->tx_lock, flags);
3010
3011         dev->trans_start = jiffies;
3012
3013         return 0;
3014 }
3015
3016 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3017 {
3018         struct tg3 *tp = netdev_priv(dev);
3019         dma_addr_t mapping;
3020         u32 len, entry, base_flags, mss;
3021         unsigned long flags;
3022
3023         len = skb_headlen(skb);
3024
3025         /* No BH disabling for tx_lock here.  We are running in BH disabled
3026          * context and TX reclaim runs via tp->poll inside of a software
3027          * interrupt.  Rejoice!
3028          *
3029          * Actually, things are not so simple.  If we are to take a hw
3030          * IRQ here, we can deadlock, consider:
3031          *
3032          *       CPU1           CPU2
3033          *   tg3_start_xmit
3034          *   take tp->tx_lock
3035          *                      tg3_timer
3036          *                      take tp->lock
3037          *   tg3_interrupt
3038          *   spin on tp->lock
3039          *                      spin on tp->tx_lock
3040          *
3041          * So we really do need to disable interrupts when taking
3042          * tx_lock here.
3043          */
3044         spin_lock_irqsave(&tp->tx_lock, flags);
3045
3046         /* This is a hard error, log it. */
3047         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3048                 netif_stop_queue(dev);
3049                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3050                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3051                        dev->name);
3052                 return 1;
3053         }
3054
3055         entry = tp->tx_prod;
3056         base_flags = 0;
3057         if (skb->ip_summed == CHECKSUM_HW)
3058                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3059 #if TG3_TSO_SUPPORT != 0
3060         mss = 0;
3061         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3062             (mss = skb_shinfo(skb)->tso_size) != 0) {
3063                 int tcp_opt_len, ip_tcp_len;
3064
3065                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3066                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3067
3068                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3069                                TXD_FLAG_CPU_POST_DMA);
3070
3071                 skb->nh.iph->check = 0;
3072                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3073                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
3074                                                       skb->nh.iph->daddr,
3075                                                       0, IPPROTO_TCP, 0);
3076
3077                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3078                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3079                                 int tsflags;
3080
3081                                 tsflags = ((skb->nh.iph->ihl - 5) +
3082                                            (tcp_opt_len >> 2));
3083                                 mss |= (tsflags << 11);
3084                         }
3085                 } else {
3086                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3087                                 int tsflags;
3088
3089                                 tsflags = ((skb->nh.iph->ihl - 5) +
3090                                            (tcp_opt_len >> 2));
3091                                 base_flags |= tsflags << 12;
3092                         }
3093                 }
3094         }
3095 #else
3096         mss = 0;
3097 #endif
3098 #if TG3_VLAN_TAG_USED
3099         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3100                 base_flags |= (TXD_FLAG_VLAN |
3101                                (vlan_tx_tag_get(skb) << 16));
3102 #endif
3103
3104         /* Queue skb data, a.k.a. the main skb fragment. */
3105         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3106
3107         tp->tx_buffers[entry].skb = skb;
3108         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3109
3110         tg3_set_txd(tp, entry, mapping, len, base_flags,
3111                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3112
3113         entry = NEXT_TX(entry);
3114
3115         /* Now loop through additional data fragments, and queue them. */
3116         if (skb_shinfo(skb)->nr_frags > 0) {
3117                 unsigned int i, last;
3118
3119                 last = skb_shinfo(skb)->nr_frags - 1;
3120                 for (i = 0; i <= last; i++) {
3121                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3122
3123
3124                         len = frag->size;
3125                         mapping = pci_map_page(tp->pdev,
3126                                                frag->page,
3127                                                frag->page_offset,
3128                                                len, PCI_DMA_TODEVICE);
3129
3130                         tp->tx_buffers[entry].skb = NULL;
3131                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3132
3133                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3134                                 tg3_set_txd(tp, entry, mapping, len,
3135                                             base_flags, (i == last)|(mss << 1));
3136                         else
3137                                 tg3_set_txd(tp, entry, mapping, len,
3138                                             base_flags, (i == last));
3139
3140                         entry = NEXT_TX(entry);
3141                 }
3142         }
3143
3144         /* Packets are ready, update Tx producer idx local and on card.
3145          * We know this is not a 5700 (by virtue of not being a chip
3146          * requiring the 4GB overflow workaround) so we can safely omit
3147          * the double-write bug tests.
3148          */
3149         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3150                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
3151                               TG3_64BIT_REG_LOW), entry);
3152         } else {
3153                 /* First, make sure tg3 sees last descriptor fully
3154                  * in SRAM.
3155                  */
3156                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3157                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
3158                              TG3_64BIT_REG_LOW);
3159
3160                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3161                               TG3_64BIT_REG_LOW), entry);
3162         }
3163
3164         tp->tx_prod = entry;
3165         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3166                 netif_stop_queue(dev);
3167
3168         spin_unlock_irqrestore(&tp->tx_lock, flags);
3169
3170         dev->trans_start = jiffies;
3171
3172         return 0;
3173 }
3174
3175 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3176                                int new_mtu)
3177 {
3178         dev->mtu = new_mtu;
3179
3180         if (new_mtu > ETH_DATA_LEN)
3181                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3182         else
3183                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3184 }
3185
3186 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3187 {
3188         struct tg3 *tp = netdev_priv(dev);
3189
3190         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3191                 return -EINVAL;
3192
3193         if (!netif_running(dev)) {
3194                 /* We'll just catch it later when the
3195                  * device is up'd.
3196                  */
3197                 tg3_set_mtu(dev, tp, new_mtu);
3198                 return 0;
3199         }
3200
3201         tg3_netif_stop(tp);
3202         spin_lock_irq(&tp->lock);
3203         spin_lock(&tp->tx_lock);
3204
3205         tg3_halt(tp);
3206
3207         tg3_set_mtu(dev, tp, new_mtu);
3208
3209         tg3_init_hw(tp);
3210
3211         spin_unlock(&tp->tx_lock);
3212         spin_unlock_irq(&tp->lock);
3213         tg3_netif_start(tp);
3214
3215         return 0;
3216 }
3217
3218 /* Free up pending packets in all rx/tx rings.
3219  *
3220  * The chip has been shut down and the driver detached from
3221  * the networking, so no interrupts or new tx packets will
3222  * end up in the driver.  tp->{tx,}lock is not held and we are not
3223  * in an interrupt context and thus may sleep.
3224  */
3225 static void tg3_free_rings(struct tg3 *tp)
3226 {
3227         struct ring_info *rxp;
3228         int i;
3229
3230         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3231                 rxp = &tp->rx_std_buffers[i];
3232
3233                 if (rxp->skb == NULL)
3234                         continue;
3235                 pci_unmap_single(tp->pdev,
3236                                  pci_unmap_addr(rxp, mapping),
3237                                  RX_PKT_BUF_SZ - tp->rx_offset,
3238                                  PCI_DMA_FROMDEVICE);
3239                 dev_kfree_skb_any(rxp->skb);
3240                 rxp->skb = NULL;
3241         }
3242
3243         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3244                 rxp = &tp->rx_jumbo_buffers[i];
3245
3246                 if (rxp->skb == NULL)
3247                         continue;
3248                 pci_unmap_single(tp->pdev,
3249                                  pci_unmap_addr(rxp, mapping),
3250                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3251                                  PCI_DMA_FROMDEVICE);
3252                 dev_kfree_skb_any(rxp->skb);
3253                 rxp->skb = NULL;
3254         }
3255
3256         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3257                 struct tx_ring_info *txp;
3258                 struct sk_buff *skb;
3259                 int j;
3260
3261                 txp = &tp->tx_buffers[i];
3262                 skb = txp->skb;
3263
3264                 if (skb == NULL) {
3265                         i++;
3266                         continue;
3267                 }
3268
3269                 pci_unmap_single(tp->pdev,
3270                                  pci_unmap_addr(txp, mapping),
3271                                  skb_headlen(skb),
3272                                  PCI_DMA_TODEVICE);
3273                 txp->skb = NULL;
3274
3275                 i++;
3276
3277                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3278                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3279                         pci_unmap_page(tp->pdev,
3280                                        pci_unmap_addr(txp, mapping),
3281                                        skb_shinfo(skb)->frags[j].size,
3282                                        PCI_DMA_TODEVICE);
3283                         i++;
3284                 }
3285
3286                 dev_kfree_skb_any(skb);
3287         }
3288 }
3289
3290 /* Initialize tx/rx rings for packet processing.
3291  *
3292  * The chip has been shut down and the driver detached from
3293  * the networking, so no interrupts or new tx packets will
3294  * end up in the driver.  tp->{tx,}lock are held and thus
3295  * we may not sleep.
3296  */
3297 static void tg3_init_rings(struct tg3 *tp)
3298 {
3299         unsigned long start, end;
3300         u32 i;
3301
3302         /* Free up all the SKBs. */
3303         tg3_free_rings(tp);
3304
3305         /* Zero out all descriptors. */
3306         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3307         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3308         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3309
3310         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3311                 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3312         } else {
3313                 start = (tp->regs +
3314                          NIC_SRAM_WIN_BASE +
3315                          NIC_SRAM_TX_BUFFER_DESC);
3316                 end = start + TG3_TX_RING_BYTES;
3317                 while (start < end) {
3318                         writel(0, start);
3319                         start += 4;
3320                 }
3321                 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3322                         tp->tx_buffers[i].prev_vlan_tag = 0;
3323         }
3324
3325         /* Initialize invariants of the rings, we only set this
3326          * stuff once.  This works because the card does not
3327          * write into the rx buffer posting rings.
3328          */
3329         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3330                 struct tg3_rx_buffer_desc *rxd;
3331
3332                 rxd = &tp->rx_std[i];
3333                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3334                         << RXD_LEN_SHIFT;
3335                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3336                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3337                                (i << RXD_OPAQUE_INDEX_SHIFT));
3338         }
3339
3340         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3341                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3342                         struct tg3_rx_buffer_desc *rxd;
3343
3344                         rxd = &tp->rx_jumbo[i];
3345                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3346                                 << RXD_LEN_SHIFT;
3347                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3348                                 RXD_FLAG_JUMBO;
3349                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3350                                (i << RXD_OPAQUE_INDEX_SHIFT));
3351                 }
3352         }
3353
3354         /* Now allocate fresh SKBs for each rx ring. */
3355         for (i = 0; i < tp->rx_pending; i++) {
3356                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3357                                      -1, i) < 0)
3358                         break;
3359         }
3360
3361         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3362                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3363                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3364                                              -1, i) < 0)
3365                                 break;
3366                 }
3367         }
3368 }
3369
3370 /*
3371  * Must not be invoked with interrupt sources disabled and
3372  * the hardware shutdown down.
3373  */
3374 static void tg3_free_consistent(struct tg3 *tp)
3375 {
3376         if (tp->rx_std_buffers) {
3377                 kfree(tp->rx_std_buffers);
3378                 tp->rx_std_buffers = NULL;
3379         }
3380         if (tp->rx_std) {
3381                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3382                                     tp->rx_std, tp->rx_std_mapping);
3383                 tp->rx_std = NULL;
3384         }
3385         if (tp->rx_jumbo) {
3386                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3387                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3388                 tp->rx_jumbo = NULL;
3389         }
3390         if (tp->rx_rcb) {
3391                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3392                                     tp->rx_rcb, tp->rx_rcb_mapping);
3393                 tp->rx_rcb = NULL;
3394         }
3395         if (tp->tx_ring) {
3396                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3397                         tp->tx_ring, tp->tx_desc_mapping);
3398                 tp->tx_ring = NULL;
3399         }
3400         if (tp->hw_status) {
3401                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3402                                     tp->hw_status, tp->status_mapping);
3403                 tp->hw_status = NULL;
3404         }
3405         if (tp->hw_stats) {
3406                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3407                                     tp->hw_stats, tp->stats_mapping);
3408                 tp->hw_stats = NULL;
3409         }
3410 }
3411
3412 /*
3413  * Must not be invoked with interrupt sources disabled and
3414  * the hardware shutdown down.  Can sleep.
3415  */
3416 static int tg3_alloc_consistent(struct tg3 *tp)
3417 {
3418         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3419                                       (TG3_RX_RING_SIZE +
3420                                        TG3_RX_JUMBO_RING_SIZE)) +
3421                                      (sizeof(struct tx_ring_info) *
3422                                       TG3_TX_RING_SIZE),
3423                                      GFP_KERNEL);
3424         if (!tp->rx_std_buffers)
3425                 return -ENOMEM;
3426
3427         memset(tp->rx_std_buffers, 0,
3428                (sizeof(struct ring_info) *
3429                 (TG3_RX_RING_SIZE +
3430                  TG3_RX_JUMBO_RING_SIZE)) +
3431                (sizeof(struct tx_ring_info) *
3432                 TG3_TX_RING_SIZE));
3433
3434         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3435         tp->tx_buffers = (struct tx_ring_info *)
3436                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3437
3438         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3439                                           &tp->rx_std_mapping);
3440         if (!tp->rx_std)
3441                 goto err_out;
3442
3443         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3444                                             &tp->rx_jumbo_mapping);
3445
3446         if (!tp->rx_jumbo)
3447                 goto err_out;
3448
3449         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3450                                           &tp->rx_rcb_mapping);
3451         if (!tp->rx_rcb)
3452                 goto err_out;
3453
3454         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3455                 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3456                                                    &tp->tx_desc_mapping);
3457                 if (!tp->tx_ring)
3458                         goto err_out;
3459         } else {
3460                 tp->tx_ring = NULL;
3461                 tp->tx_desc_mapping = 0;
3462         }
3463
3464         tp->hw_status = pci_alloc_consistent(tp->pdev,
3465                                              TG3_HW_STATUS_SIZE,
3466                                              &tp->status_mapping);
3467         if (!tp->hw_status)
3468                 goto err_out;
3469
3470         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3471                                             sizeof(struct tg3_hw_stats),
3472                                             &tp->stats_mapping);
3473         if (!tp->hw_stats)
3474                 goto err_out;
3475
3476         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3477         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3478
3479         return 0;
3480
3481 err_out:
3482         tg3_free_consistent(tp);
3483         return -ENOMEM;
3484 }
3485
3486 #define MAX_WAIT_CNT 1000
3487
3488 /* To stop a block, clear the enable bit and poll till it
3489  * clears.  tp->lock is held.
3490  */
3491 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3492 {
3493         unsigned int i;
3494         u32 val;
3495
3496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
3498                 switch (ofs) {
3499                 case RCVLSC_MODE:
3500                 case DMAC_MODE:
3501                 case MBFREE_MODE:
3502                 case BUFMGR_MODE:
3503                 case MEMARB_MODE:
3504                         /* We can't enable/disable these bits of the
3505                          * 5705/5750, just say success.
3506                          */
3507                         return 0;
3508
3509                 default:
3510                         break;
3511                 };
3512         }
3513
3514         val = tr32(ofs);
3515         val &= ~enable_bit;
3516         tw32_f(ofs, val);
3517
3518         for (i = 0; i < MAX_WAIT_CNT; i++) {
3519                 udelay(100);
3520                 val = tr32(ofs);
3521                 if ((val & enable_bit) == 0)
3522                         break;
3523         }
3524
3525         if (i == MAX_WAIT_CNT) {
3526                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3527                        "ofs=%lx enable_bit=%x\n",
3528                        ofs, enable_bit);
3529                 return -ENODEV;
3530         }
3531
3532         return 0;
3533 }
3534
3535 /* tp->lock is held. */
3536 static int tg3_abort_hw(struct tg3 *tp)
3537 {
3538         int i, err;
3539
3540         tg3_disable_ints(tp);
3541
3542         tp->rx_mode &= ~RX_MODE_ENABLE;
3543         tw32_f(MAC_RX_MODE, tp->rx_mode);
3544         udelay(10);
3545
3546         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3547         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3548         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3549         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3550         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3551         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3552
3553         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3554         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3555         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3556         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3557         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3558         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3559         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3560         if (err)
3561                 goto out;
3562
3563         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3564         tw32_f(MAC_MODE, tp->mac_mode);
3565         udelay(40);
3566
3567         tp->tx_mode &= ~TX_MODE_ENABLE;
3568         tw32_f(MAC_TX_MODE, tp->tx_mode);
3569
3570         for (i = 0; i < MAX_WAIT_CNT; i++) {
3571                 udelay(100);
3572                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3573                         break;
3574         }
3575         if (i >= MAX_WAIT_CNT) {
3576                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3577                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3578                        tp->dev->name, tr32(MAC_TX_MODE));
3579                 return -ENODEV;
3580         }
3581
3582         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3583         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3584         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3585
3586         tw32(FTQ_RESET, 0xffffffff);
3587         tw32(FTQ_RESET, 0x00000000);
3588
3589         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3590         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3591         if (err)
3592                 goto out;
3593
3594         if (tp->hw_status)
3595                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3596         if (tp->hw_stats)
3597                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3598
3599 out:
3600         return err;
3601 }
3602
3603 /* tp->lock is held. */
3604 static int tg3_nvram_lock(struct tg3 *tp)
3605 {
3606         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3607                 int i;
3608
3609                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3610                 for (i = 0; i < 8000; i++) {
3611                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3612                                 break;
3613                         udelay(20);
3614                 }
3615                 if (i == 8000)
3616                         return -ENODEV;
3617         }
3618         return 0;
3619 }
3620
3621 /* tp->lock is held. */
3622 static void tg3_nvram_unlock(struct tg3 *tp)
3623 {
3624         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3625                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3626 }
3627
3628 /* tp->lock is held. */
3629 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3630 {
3631         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3632                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3633
3634         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3635                 switch (kind) {
3636                 case RESET_KIND_INIT:
3637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3638                                       DRV_STATE_START);
3639                         break;
3640
3641                 case RESET_KIND_SHUTDOWN:
3642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3643                                       DRV_STATE_UNLOAD);
3644                         break;
3645
3646                 case RESET_KIND_SUSPEND:
3647                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3648                                       DRV_STATE_SUSPEND);
3649                         break;
3650
3651                 default:
3652                         break;
3653                 };
3654         }
3655 }
3656
3657 /* tp->lock is held. */
3658 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3659 {
3660         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3661                 switch (kind) {
3662                 case RESET_KIND_INIT:
3663                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3664                                       DRV_STATE_START_DONE);
3665                         break;
3666
3667                 case RESET_KIND_SHUTDOWN:
3668                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3669                                       DRV_STATE_UNLOAD_DONE);
3670                         break;
3671
3672                 default:
3673                         break;
3674                 };
3675         }
3676 }
3677
3678 /* tp->lock is held. */
3679 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3680 {
3681         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3682                 switch (kind) {
3683                 case RESET_KIND_INIT:
3684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3685                                       DRV_STATE_START);
3686                         break;
3687
3688                 case RESET_KIND_SHUTDOWN:
3689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3690                                       DRV_STATE_UNLOAD);
3691                         break;
3692
3693                 case RESET_KIND_SUSPEND:
3694                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3695                                       DRV_STATE_SUSPEND);
3696                         break;
3697
3698                 default:
3699                         break;
3700                 };
3701         }
3702 }
3703
3704 /* tp->lock is held. */
3705 static int tg3_chip_reset(struct tg3 *tp)
3706 {
3707         u32 val;
3708         u32 flags_save;
3709         int i;
3710
3711         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704))
3712                 tg3_nvram_lock(tp);
3713
3714         /*
3715          * We must avoid the readl() that normally takes place.
3716          * It locks machines, causes machine checks, and other
3717          * fun things.  So, temporarily disable the 5701
3718          * hardware workaround, while we do the reset.
3719          */
3720         flags_save = tp->tg3_flags;
3721         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3722
3723         /* do the reset */
3724         val = GRC_MISC_CFG_CORECLK_RESET;
3725
3726         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3727                 if (tr32(0x7e2c) == 0x60) {
3728                         tw32(0x7e2c, 0x20);
3729                 }
3730                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3731                         tw32(GRC_MISC_CFG, (1 << 29));
3732                         val |= (1 << 29);
3733                 }
3734         }
3735
3736         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
3737             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3738                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3739         tw32(GRC_MISC_CFG, val);
3740
3741         /* restore 5701 hardware bug workaround flag */
3742         tp->tg3_flags = flags_save;
3743
3744         /* Flush PCI posted writes.  The normal MMIO registers
3745          * are inaccessible at this time so this is the only
3746          * way to make this reliably.  I tried to use indirect
3747          * register read/write but this upset some 5701 variants.
3748          */
3749         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3750
3751         udelay(120);
3752
3753         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3754                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3755                         int i;
3756                         u32 cfg_val;
3757
3758                         /* Wait for link training to complete.  */
3759                         for (i = 0; i < 5000; i++)
3760                                 udelay(100);
3761
3762                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3763                         pci_write_config_dword(tp->pdev, 0xc4,
3764                                                cfg_val | (1 << 15));
3765                 }
3766                 /* Set PCIE max payload size and clear error status.  */
3767                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3768         }
3769
3770         /* Re-enable indirect register accesses. */
3771         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3772                                tp->misc_host_ctrl);
3773
3774         /* Set MAX PCI retry to zero. */
3775         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3776         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3777             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3778                 val |= PCISTATE_RETRY_SAME_DMA;
3779         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3780
3781         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3782
3783         /* Make sure PCI-X relaxed ordering bit is clear. */
3784         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3785         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3786         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3787
3788         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3789
3790         tw32(GRC_MODE, tp->grc_mode);
3791
3792         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3793                 u32 val = tr32(0xc4);
3794
3795                 tw32(0xc4, val | (1 << 15));
3796         }
3797
3798         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3799             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3800                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3801                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3802                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3803                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3804         }
3805
3806         if (tp->phy_id == PHY_ID_SERDES) {
3807                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3808                 tw32_f(MAC_MODE, tp->mac_mode);
3809         } else
3810                 tw32_f(MAC_MODE, 0);
3811         udelay(40);
3812
3813         /* Wait for firmware initialization to complete. */
3814         for (i = 0; i < 100000; i++) {
3815                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3816                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3817                         break;
3818                 udelay(10);
3819         }
3820         if (i >= 100000 &&
3821             !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3822                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3823                        "firmware will not restart magic=%08x\n",
3824                        tp->dev->name, val);
3825                 return -ENODEV;
3826         }
3827
3828         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3829             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3830                 u32 val = tr32(0x7c00);
3831
3832                 tw32(0x7c00, val | (1 << 25));
3833         }
3834
3835         /* Reprobe ASF enable state.  */
3836         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3837         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3838         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3839         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3840                 u32 nic_cfg;
3841
3842                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3843                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3844                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3845                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3846                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3847                 }
3848         }
3849
3850         return 0;
3851 }
3852
3853 /* tp->lock is held. */
3854 static void tg3_stop_fw(struct tg3 *tp)
3855 {
3856         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3857                 u32 val;
3858                 int i;
3859
3860                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3861                 val = tr32(GRC_RX_CPU_EVENT);
3862                 val |= (1 << 14);
3863                 tw32(GRC_RX_CPU_EVENT, val);
3864
3865                 /* Wait for RX cpu to ACK the event.  */
3866                 for (i = 0; i < 100; i++) {
3867                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3868                                 break;
3869                         udelay(1);
3870                 }
3871         }
3872 }
3873
3874 /* tp->lock is held. */
3875 static int tg3_halt(struct tg3 *tp)
3876 {
3877         int err;
3878
3879         tg3_stop_fw(tp);
3880
3881         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
3882
3883         tg3_abort_hw(tp);
3884         err = tg3_chip_reset(tp);
3885
3886         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
3887         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3888
3889         if (err)
3890                 return err;
3891
3892         return 0;
3893 }
3894
3895 #define TG3_FW_RELEASE_MAJOR    0x0
3896 #define TG3_FW_RELASE_MINOR     0x0
3897 #define TG3_FW_RELEASE_FIX      0x0
3898 #define TG3_FW_START_ADDR       0x08000000
3899 #define TG3_FW_TEXT_ADDR        0x08000000
3900 #define TG3_FW_TEXT_LEN         0x9c0
3901 #define TG3_FW_RODATA_ADDR      0x080009c0
3902 #define TG3_FW_RODATA_LEN       0x60
3903 #define TG3_FW_DATA_ADDR        0x08000a40
3904 #define TG3_FW_DATA_LEN         0x20
3905 #define TG3_FW_SBSS_ADDR        0x08000a60
3906 #define TG3_FW_SBSS_LEN         0xc
3907 #define TG3_FW_BSS_ADDR         0x08000a70
3908 #define TG3_FW_BSS_LEN          0x10
3909
3910 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3911         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3912         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3913         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3914         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3915         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3916         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3917         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3918         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3919         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3920         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3921         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3922         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3923         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3924         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3925         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3926         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3927         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3928         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3929         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3930         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3931         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3932         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3933         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3934         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3935         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3936         0, 0, 0, 0, 0, 0,
3937         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3938         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3939         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3940         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3941         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3942         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3943         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3944         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3945         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3946         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3947         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3948         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3949         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3950         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3951         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3952         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3953         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3954         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3955         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3956         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3957         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3958         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3959         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3960         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3961         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3962         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3963         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3964         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3965         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3966         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3967         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3968         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3969         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3970         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3971         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3972         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3973         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3974         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3975         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3976         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3977         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3978         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3979         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3980         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3981         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3982         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3983         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3984         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3985         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3986         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3987         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3988         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3989         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3990         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3991         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3992         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3993         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3994         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3995         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3996         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3997         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3998         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3999         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4000         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4001         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4002 };
4003
4004 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4005         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4006         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4007         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4008         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4009         0x00000000
4010 };
4011
4012 #if 0 /* All zeros, don't eat up space with it. */
4013 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4014         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4015         0x00000000, 0x00000000, 0x00000000, 0x00000000
4016 };
4017 #endif
4018
4019 #define RX_CPU_SCRATCH_BASE     0x30000
4020 #define RX_CPU_SCRATCH_SIZE     0x04000
4021 #define TX_CPU_SCRATCH_BASE     0x34000
4022 #define TX_CPU_SCRATCH_SIZE     0x04000
4023
4024 /* tp->lock is held. */
4025 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4026 {
4027         int i;
4028
4029         if (offset == TX_CPU_BASE &&
4030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4031                 BUG();
4032
4033         if (offset == RX_CPU_BASE) {
4034                 for (i = 0; i < 10000; i++) {
4035                         tw32(offset + CPU_STATE, 0xffffffff);
4036                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4037                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4038                                 break;
4039                 }
4040
4041                 tw32(offset + CPU_STATE, 0xffffffff);
4042                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4043                 udelay(10);
4044         } else {
4045                 for (i = 0; i < 10000; i++) {
4046                         tw32(offset + CPU_STATE, 0xffffffff);
4047                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4048                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4049                                 break;
4050                 }
4051         }
4052
4053         if (i >= 10000) {
4054                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4055                        "and %s CPU\n",
4056                        tp->dev->name,
4057                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4058                 return -ENODEV;
4059         }
4060         return 0;
4061 }
4062
4063 struct fw_info {
4064         unsigned int text_base;
4065         unsigned int text_len;
4066         u32 *text_data;
4067         unsigned int rodata_base;
4068         unsigned int rodata_len;
4069         u32 *rodata_data;
4070         unsigned int data_base;
4071         unsigned int data_len;
4072         u32 *data_data;
4073 };
4074
4075 /* tp->lock is held. */
4076 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4077                                  int cpu_scratch_size, struct fw_info *info)
4078 {
4079         int err, i;
4080         u32 orig_tg3_flags = tp->tg3_flags;
4081         void (*write_op)(struct tg3 *, u32, u32);
4082
4083         if (cpu_base == TX_CPU_BASE &&
4084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4085                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4086                        "TX cpu firmware on %s which is 5705.\n",
4087                        tp->dev->name);
4088                 return -EINVAL;
4089         }
4090
4091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
4092                 write_op = tg3_write_mem;
4093         else
4094                 write_op = tg3_write_indirect_reg32;
4095
4096         /* Force use of PCI config space for indirect register
4097          * write calls.
4098          */
4099         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4100
4101         err = tg3_halt_cpu(tp, cpu_base);
4102         if (err)
4103                 goto out;
4104
4105         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4106                 write_op(tp, cpu_scratch_base + i, 0);
4107         tw32(cpu_base + CPU_STATE, 0xffffffff);
4108         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4109         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4110                 write_op(tp, (cpu_scratch_base +
4111                               (info->text_base & 0xffff) +
4112                               (i * sizeof(u32))),
4113                          (info->text_data ?
4114                           info->text_data[i] : 0));
4115         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4116                 write_op(tp, (cpu_scratch_base +
4117                               (info->rodata_base & 0xffff) +
4118                               (i * sizeof(u32))),
4119                          (info->rodata_data ?
4120                           info->rodata_data[i] : 0));
4121         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4122                 write_op(tp, (cpu_scratch_base +
4123                               (info->data_base & 0xffff) +
4124                               (i * sizeof(u32))),
4125                          (info->data_data ?
4126                           info->data_data[i] : 0));
4127
4128         err = 0;
4129
4130 out:
4131         tp->tg3_flags = orig_tg3_flags;
4132         return err;
4133 }
4134
4135 /* tp->lock is held. */
4136 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4137 {
4138         struct fw_info info;
4139         int err, i;
4140
4141         info.text_base = TG3_FW_TEXT_ADDR;
4142         info.text_len = TG3_FW_TEXT_LEN;
4143         info.text_data = &tg3FwText[0];
4144         info.rodata_base = TG3_FW_RODATA_ADDR;
4145         info.rodata_len = TG3_FW_RODATA_LEN;
4146         info.rodata_data = &tg3FwRodata[0];
4147         info.data_base = TG3_FW_DATA_ADDR;
4148         info.data_len = TG3_FW_DATA_LEN;
4149         info.data_data = NULL;
4150
4151         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4152                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4153                                     &info);
4154         if (err)
4155                 return err;
4156
4157         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4158                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4159                                     &info);
4160         if (err)
4161                 return err;
4162
4163         /* Now startup only the RX cpu. */
4164         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4165         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4166
4167         for (i = 0; i < 5; i++) {
4168                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4169                         break;
4170                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4171                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4172                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4173                 udelay(1000);
4174         }
4175         if (i >= 5) {
4176                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4177                        "to set RX CPU PC, is %08x should be %08x\n",
4178                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4179                        TG3_FW_TEXT_ADDR);
4180                 return -ENODEV;
4181         }
4182         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4183         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4184
4185         return 0;
4186 }
4187
4188 #if TG3_TSO_SUPPORT != 0
4189
4190 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4191 #define TG3_TSO_FW_RELASE_MINOR         0x6
4192 #define TG3_TSO_FW_RELEASE_FIX          0x0
4193 #define TG3_TSO_FW_START_ADDR           0x08000000
4194 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4195 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4196 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4197 #define TG3_TSO_FW_RODATA_LEN           0x60
4198 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4199 #define TG3_TSO_FW_DATA_LEN             0x30
4200 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4201 #define TG3_TSO_FW_SBSS_LEN             0x2c
4202 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4203 #define TG3_TSO_FW_BSS_LEN              0x894
4204
4205 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4206         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4207         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4208         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4209         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4210         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4211         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4212         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4213         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4214         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4215         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4216         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4217         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4218         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4219         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4220         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4221         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4222         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4223         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4224         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4225         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4226         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4227         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4228         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4229         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4230         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4231         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4232         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4233         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4234         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4235         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4236         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4237         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4238         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4239         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4240         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4241         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4242         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4243         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4244         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4245         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4246         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4247         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4248         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4249         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4250         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4251         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4252         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4253         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4254         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4255         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4256         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4257         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4258         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4259         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4260         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4261         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4262         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4263         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4264         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4265         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4266         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4267         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4268         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4269         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4270         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4271         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4272         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4273         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4274         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4275         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4276         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4277         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4278         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4279         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4280         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4281         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4282         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4283         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4284         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4285         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4286         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4287         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4288         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4289         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4290         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4291         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4292         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4293         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4294         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4295         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4296         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4297         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4298         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4299         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4300         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4301         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4302         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4303         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4304         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4305         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4306         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4307         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4308         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4309         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4310         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4311         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4312         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4313         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4314         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4315         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4316         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4317         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4318         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4319         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4320         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4321         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4322         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4323         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4324         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4325         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4326         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4327         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4328         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4329         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4330         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4331         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4332         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4333         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4334         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4335         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4336         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4337         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4338         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4339         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4340         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4341         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4342         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4343         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4344         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4345         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4346         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4347         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4348         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4349         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4350         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4351         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4352         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4353         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4354         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4355         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4356         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4357         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4358         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4359         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4360         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4361         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4362         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4363         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4364         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4365         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4366         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4367         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4368         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4369         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4370         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4371         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4372         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4373         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4374         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4375         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4376         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4377         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4378         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4379         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4380         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4381         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4382         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4383         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4384         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4385         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4386         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4387         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4388         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4389         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4390         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4391         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4392         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4393         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4394         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4395         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4396         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4397         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4398         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4399         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4400         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4401         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4402         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4403         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4404         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4405         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4406         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4407         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4408         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4409         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4410         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4411         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4412         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4413         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4414         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4415         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4416         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4417         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4418         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4419         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4420         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4421         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4422         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4423         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4424         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4425         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4426         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4427         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4428         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4429         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4430         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4431         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4432         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4433         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4434         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4435         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4436         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4437         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4438         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4439         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4440         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4441         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4442         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4443         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4444         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4445         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4446         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4447         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4448         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4449         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4450         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4451         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4452         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4453         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4454         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4455         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4456         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4457         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4458         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4459         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4460         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4461         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4462         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4463         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4464         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4465         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4466         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4467         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4468         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4469         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4470         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4471         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4472         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4473         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4474         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4475         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4476         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4477         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4478         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4479         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4480         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4481         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4482         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4483         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4484         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4485         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4486         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4487         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4488         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4489         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4490 };
4491
4492 u32 tg3TsoFwRodata[] = {
4493         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4494         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4495         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4496         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4497         0x00000000,
4498 };
4499
4500 u32 tg3TsoFwData[] = {
4501         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4502         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4503         0x00000000,
4504 };
4505
4506 /* 5705 needs a special version of the TSO firmware.  */
4507 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4508 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4509 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4510 #define TG3_TSO5_FW_START_ADDR          0x00010000
4511 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4512 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4513 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4514 #define TG3_TSO5_FW_RODATA_LEN          0x50
4515 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4516 #define TG3_TSO5_FW_DATA_LEN            0x20
4517 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4518 #define TG3_TSO5_FW_SBSS_LEN            0x28
4519 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4520 #define TG3_TSO5_FW_BSS_LEN             0x88
4521
4522 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4523         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4524         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4525         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4526         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4527         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4528         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4529         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4530         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4531         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4532         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4533         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4534         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4535         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4536         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4537         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4538         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4539         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4540         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4541         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4542         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4543         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4544         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4545         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4546         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4547         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4548         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4549         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4550         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4551         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4552         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4553         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4554         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4555         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4556         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4557         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4558         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4559         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4560         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4561         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4562         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4563         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4564         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4565         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4566         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4567         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4568         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4569         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4570         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4571         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4572         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4573         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4574         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4575         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4576         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4577         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4578         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4579         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4580         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4581         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4582         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4583         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4584         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4585         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4586         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4587         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4588         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4589         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4590         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4591         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4592         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4593         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4594         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4595         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4596         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4597         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4598         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4599         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4600         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4601         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4602         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4603         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4604         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4605         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4606         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4607         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4608         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4609         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4610         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4611         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4612         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4613         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4614         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4615         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4616         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4617         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4618         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4619         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4620         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4621         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4622         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4623         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4624         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4625         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4626         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4627         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4628         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4629         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4630         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4631         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4632         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4633         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4634         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4635         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4636         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4637         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4638         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4639         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4640         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4641         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4642         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4643         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4644         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4645         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4646         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4647         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4648         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4649         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4650         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4651         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4652         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4653         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4654         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4655         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4656         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4657         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4658         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4659         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4660         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4661         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4662         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4663         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4664         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4665         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4666         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4667         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4668         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4669         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4670         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4671         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4672         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4673         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4674         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4675         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4676         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4677         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4678         0x00000000, 0x00000000, 0x00000000,
4679 };
4680
4681 u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4682         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4683         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4684         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4685         0x00000000, 0x00000000, 0x00000000,
4686 };
4687
4688 u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4689         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4690         0x00000000, 0x00000000, 0x00000000,
4691 };
4692
4693 /* tp->lock is held. */
4694 static int tg3_load_tso_firmware(struct tg3 *tp)
4695 {
4696         struct fw_info info;
4697         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4698         int err, i;
4699
4700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
4701                 return 0;
4702
4703         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4704                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4705                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4706                 info.text_data = &tg3Tso5FwText[0];
4707                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4708                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4709                 info.rodata_data = &tg3Tso5FwRodata[0];
4710                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4711                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4712                 info.data_data = &tg3Tso5FwData[0];
4713                 cpu_base = RX_CPU_BASE;
4714                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4715                 cpu_scratch_size = (info.text_len +
4716                                     info.rodata_len +
4717                                     info.data_len +
4718                                     TG3_TSO5_FW_SBSS_LEN +
4719                                     TG3_TSO5_FW_BSS_LEN);
4720         } else {
4721                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4722                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4723                 info.text_data = &tg3TsoFwText[0];
4724                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4725                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4726                 info.rodata_data = &tg3TsoFwRodata[0];
4727                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4728                 info.data_len = TG3_TSO_FW_DATA_LEN;
4729                 info.data_data = &tg3TsoFwData[0];
4730                 cpu_base = TX_CPU_BASE;
4731                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4732                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4733         }
4734
4735         err = tg3_load_firmware_cpu(tp, cpu_base,
4736                                     cpu_scratch_base, cpu_scratch_size,
4737                                     &info);
4738         if (err)
4739                 return err;
4740
4741         /* Now startup the cpu. */
4742         tw32(cpu_base + CPU_STATE, 0xffffffff);
4743         tw32_f(cpu_base + CPU_PC,    info.text_base);
4744
4745         for (i = 0; i < 5; i++) {
4746                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4747                         break;
4748                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4749                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4750                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4751                 udelay(1000);
4752         }
4753         if (i >= 5) {
4754                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4755                        "to set CPU PC, is %08x should be %08x\n",
4756                        tp->dev->name, tr32(cpu_base + CPU_PC),
4757                        info.text_base);
4758                 return -ENODEV;
4759         }
4760         tw32(cpu_base + CPU_STATE, 0xffffffff);
4761         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4762         return 0;
4763 }
4764
4765 #endif /* TG3_TSO_SUPPORT != 0 */
4766
4767 /* tp->lock is held. */
4768 static void __tg3_set_mac_addr(struct tg3 *tp)
4769 {
4770         u32 addr_high, addr_low;
4771         int i;
4772
4773         addr_high = ((tp->dev->dev_addr[0] << 8) |
4774                      tp->dev->dev_addr[1]);
4775         addr_low = ((tp->dev->dev_addr[2] << 24) |
4776                     (tp->dev->dev_addr[3] << 16) |
4777                     (tp->dev->dev_addr[4] <<  8) |
4778                     (tp->dev->dev_addr[5] <<  0));
4779         for (i = 0; i < 4; i++) {
4780                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4781                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4782         }
4783
4784         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4785             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4786             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4787                 for (i = 0; i < 12; i++) {
4788                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4789                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4790                 }
4791         }
4792
4793         addr_high = (tp->dev->dev_addr[0] +
4794                      tp->dev->dev_addr[1] +
4795                      tp->dev->dev_addr[2] +
4796                      tp->dev->dev_addr[3] +
4797                      tp->dev->dev_addr[4] +
4798                      tp->dev->dev_addr[5]) &
4799                 TX_BACKOFF_SEED_MASK;
4800         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4801 }
4802
4803 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4804 {
4805         struct tg3 *tp = netdev_priv(dev);
4806         struct sockaddr *addr = p;
4807
4808         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4809
4810         spin_lock_irq(&tp->lock);
4811         __tg3_set_mac_addr(tp);
4812         spin_unlock_irq(&tp->lock);
4813
4814         return 0;
4815 }
4816
4817 /* tp->lock is held. */
4818 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4819                            dma_addr_t mapping, u32 maxlen_flags,
4820                            u32 nic_addr)
4821 {
4822         tg3_write_mem(tp,
4823                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4824                       ((u64) mapping >> 32));
4825         tg3_write_mem(tp,
4826                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4827                       ((u64) mapping & 0xffffffff));
4828         tg3_write_mem(tp,
4829                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4830                        maxlen_flags);
4831
4832         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4833                 tg3_write_mem(tp,
4834                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4835                               nic_addr);
4836 }
4837
4838 static void __tg3_set_rx_mode(struct net_device *);
4839
4840 /* tp->lock is held. */
4841 static int tg3_reset_hw(struct tg3 *tp)
4842 {
4843         u32 val, rdmac_mode;
4844         int i, err, limit;
4845
4846         tg3_disable_ints(tp);
4847
4848         tg3_stop_fw(tp);
4849
4850         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4851
4852         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4853                 err = tg3_abort_hw(tp);
4854                 if (err)
4855                         return err;
4856         }
4857
4858         err = tg3_chip_reset(tp);
4859         if (err)
4860                 return err;
4861
4862         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4863
4864         /* This works around an issue with Athlon chipsets on
4865          * B3 tigon3 silicon.  This bit has no effect on any
4866          * other revision.  But do not set this on PCI Express
4867          * chips.
4868          */
4869         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4870                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4871         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4872
4873         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4874             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4875                 val = tr32(TG3PCI_PCISTATE);
4876                 val |= PCISTATE_RETRY_SAME_DMA;
4877                 tw32(TG3PCI_PCISTATE, val);
4878         }
4879
4880         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4881                 /* Enable some hw fixes.  */
4882                 val = tr32(TG3PCI_MSI_DATA);
4883                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4884                 tw32(TG3PCI_MSI_DATA, val);
4885         }
4886
4887         /* Descriptor ring init may make accesses to the
4888          * NIC SRAM area to setup the TX descriptors, so we
4889          * can only do this after the hardware has been
4890          * successfully reset.
4891          */
4892         tg3_init_rings(tp);
4893
4894         /* This value is determined during the probe time DMA
4895          * engine test, tg3_test_dma.
4896          */
4897         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4898
4899         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4900                           GRC_MODE_4X_NIC_SEND_RINGS |
4901                           GRC_MODE_NO_TX_PHDR_CSUM |
4902                           GRC_MODE_NO_RX_PHDR_CSUM);
4903         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4904                 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4905         else
4906                 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4907         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4908                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4909         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4910                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4911
4912         tw32(GRC_MODE,
4913              tp->grc_mode |
4914              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4915
4916         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4917         val = tr32(GRC_MISC_CFG);
4918         val &= ~0xff;
4919         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4920         tw32(GRC_MISC_CFG, val);
4921
4922         /* Initialize MBUF/DESC pool. */
4923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
4924                 /* Do nothing.  */
4925         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4926                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4927                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4928                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4929                 else
4930                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4931                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4932                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4933         }
4934 #if TG3_TSO_SUPPORT != 0
4935         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4936                 int fw_len;
4937
4938                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4939                           TG3_TSO5_FW_RODATA_LEN +
4940                           TG3_TSO5_FW_DATA_LEN +
4941                           TG3_TSO5_FW_SBSS_LEN +
4942                           TG3_TSO5_FW_BSS_LEN);
4943                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4944                 tw32(BUFMGR_MB_POOL_ADDR,
4945                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4946                 tw32(BUFMGR_MB_POOL_SIZE,
4947                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4948         }
4949 #endif
4950
4951         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4952                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4953                      tp->bufmgr_config.mbuf_read_dma_low_water);
4954                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4955                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4956                 tw32(BUFMGR_MB_HIGH_WATER,
4957                      tp->bufmgr_config.mbuf_high_water);
4958         } else {
4959                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4960                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4961                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4962                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4963                 tw32(BUFMGR_MB_HIGH_WATER,
4964                      tp->bufmgr_config.mbuf_high_water_jumbo);
4965         }
4966         tw32(BUFMGR_DMA_LOW_WATER,
4967              tp->bufmgr_config.dma_low_water);
4968         tw32(BUFMGR_DMA_HIGH_WATER,
4969              tp->bufmgr_config.dma_high_water);
4970
4971         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4972         for (i = 0; i < 2000; i++) {
4973                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4974                         break;
4975                 udelay(10);
4976         }
4977         if (i >= 2000) {
4978                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4979                        tp->dev->name);
4980                 return -ENODEV;
4981         }
4982
4983         /* Setup replenish threshold. */
4984         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4985
4986         /* Initialize TG3_BDINFO's at:
4987          *  RCVDBDI_STD_BD:     standard eth size rx ring
4988          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
4989          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
4990          *
4991          * like so:
4992          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
4993          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
4994          *                              ring attribute flags
4995          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
4996          *
4997          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4998          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4999          *
5000          * The size of each ring is fixed in the firmware, but the location is
5001          * configurable.
5002          */
5003         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5004              ((u64) tp->rx_std_mapping >> 32));
5005         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5006              ((u64) tp->rx_std_mapping & 0xffffffff));
5007         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5008              NIC_SRAM_RX_BUFFER_DESC);
5009
5010         /* Don't even try to program the JUMBO/MINI buffer descriptor
5011          * configs on 5705.
5012          */
5013         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5014             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5015                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5016                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5017         } else {
5018                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5019                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5020
5021                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5022                      BDINFO_FLAGS_DISABLED);
5023
5024                 /* Setup replenish threshold. */
5025                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5026
5027                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5028                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5029                              ((u64) tp->rx_jumbo_mapping >> 32));
5030                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5031                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5032                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5033                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5034                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5035                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5036                 } else {
5037                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5038                              BDINFO_FLAGS_DISABLED);
5039                 }
5040
5041         }
5042
5043         /* There is only one send ring on 5705/5750, no need to explicitly
5044          * disable the others.
5045          */
5046         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5047             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5048                 /* Clear out send RCB ring in SRAM. */
5049                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5050                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5051                                       BDINFO_FLAGS_DISABLED);
5052         }
5053
5054         tp->tx_prod = 0;
5055         tp->tx_cons = 0;
5056         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5057         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5058
5059         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
5060                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5061                                tp->tx_desc_mapping,
5062                                (TG3_TX_RING_SIZE <<
5063                                 BDINFO_FLAGS_MAXLEN_SHIFT),
5064                                NIC_SRAM_TX_BUFFER_DESC);
5065         } else {
5066                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5067                                0,
5068                                BDINFO_FLAGS_DISABLED,
5069                                NIC_SRAM_TX_BUFFER_DESC);
5070         }
5071
5072         /* There is only one receive return ring on 5705/5750, no need
5073          * to explicitly disable the others.
5074          */
5075         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5076             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5077                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5078                      i += TG3_BDINFO_SIZE) {
5079                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5080                                       BDINFO_FLAGS_DISABLED);
5081                 }
5082         }
5083
5084         tp->rx_rcb_ptr = 0;
5085         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5086
5087         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5088                        tp->rx_rcb_mapping,
5089                        (TG3_RX_RCB_RING_SIZE(tp) <<
5090                         BDINFO_FLAGS_MAXLEN_SHIFT),
5091                        0);
5092
5093         tp->rx_std_ptr = tp->rx_pending;
5094         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5095                      tp->rx_std_ptr);
5096
5097         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5098                                                 tp->rx_jumbo_pending : 0;
5099         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5100                      tp->rx_jumbo_ptr);
5101
5102         /* Initialize MAC address and backoff seed. */
5103         __tg3_set_mac_addr(tp);
5104
5105         /* MTU + ethernet header + FCS + optional VLAN tag */
5106         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5107
5108         /* The slot time is changed by tg3_setup_phy if we
5109          * run at gigabit with half duplex.
5110          */
5111         tw32(MAC_TX_LENGTHS,
5112              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5113              (6 << TX_LENGTHS_IPG_SHIFT) |
5114              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5115
5116         /* Receive rules. */
5117         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5118         tw32(RCVLPC_CONFIG, 0x0181);
5119
5120         /* Calculate RDMAC_MODE setting early, we need it to determine
5121          * the RCVLPC_STATE_ENABLE mask.
5122          */
5123         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5124                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5125                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5126                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5127                       RDMAC_MODE_LNGREAD_ENAB);
5128         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5129                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5130         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5131              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5132             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5133                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5134                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5135                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5136                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5137                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5138                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5139                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5140                 }
5141         }
5142
5143 #if TG3_TSO_SUPPORT != 0
5144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5145                 rdmac_mode |= (1 << 27);
5146 #endif
5147
5148         /* Receive/send statistics. */
5149         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5150             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5151                 val = tr32(RCVLPC_STATS_ENABLE);
5152                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5153                 tw32(RCVLPC_STATS_ENABLE, val);
5154         } else {
5155                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5156         }
5157         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5158         tw32(SNDDATAI_STATSENAB, 0xffffff);
5159         tw32(SNDDATAI_STATSCTRL,
5160              (SNDDATAI_SCTRL_ENABLE |
5161               SNDDATAI_SCTRL_FASTUPD));
5162
5163         /* Setup host coalescing engine. */
5164         tw32(HOSTCC_MODE, 0);
5165         for (i = 0; i < 2000; i++) {
5166                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5167                         break;
5168                 udelay(10);
5169         }
5170
5171         tw32(HOSTCC_RXCOL_TICKS, 0);
5172         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5173         tw32(HOSTCC_RXMAX_FRAMES, 1);
5174         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5175         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5176             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5177                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5178                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5179         }
5180         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5181         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5182
5183         /* set status block DMA address */
5184         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5185              ((u64) tp->status_mapping >> 32));
5186         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5187              ((u64) tp->status_mapping & 0xffffffff));
5188
5189         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5190             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5191                 /* Status/statistics block address.  See tg3_timer,
5192                  * the tg3_periodic_fetch_stats call there, and
5193                  * tg3_get_stats to see how this works for 5705/5750 chips.
5194                  */
5195                 tw32(HOSTCC_STAT_COAL_TICKS,
5196                      DEFAULT_STAT_COAL_TICKS);
5197                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5198                      ((u64) tp->stats_mapping >> 32));
5199                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5200                      ((u64) tp->stats_mapping & 0xffffffff));
5201                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5202                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5203         }
5204
5205         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5206
5207         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5208         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5209         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5210             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5211                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5212
5213         /* Clear statistics/status block in chip, and status block in ram. */
5214         for (i = NIC_SRAM_STATS_BLK;
5215              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5216              i += sizeof(u32)) {
5217                 tg3_write_mem(tp, i, 0);
5218                 udelay(40);
5219         }
5220         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5221
5222         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5223                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5224         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5225         udelay(40);
5226
5227         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5229                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5230                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5231         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5232         udelay(100);
5233
5234         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5235         tr32(MAILBOX_INTERRUPT_0);
5236
5237         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5238             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
5239                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5240                 udelay(40);
5241         }
5242
5243         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5244                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5245                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5246                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5247                WDMAC_MODE_LNGREAD_ENAB);
5248
5249         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5250              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5252                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5253                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5254                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5255                         /* nothing */
5256                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5257                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5258                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5259                         val |= WDMAC_MODE_RX_ACCEL;
5260                 }
5261         }
5262
5263         tw32_f(WDMAC_MODE, val);
5264         udelay(40);
5265
5266         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5267                 val = tr32(TG3PCI_X_CAPS);
5268                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5269                         val &= ~PCIX_CAPS_BURST_MASK;
5270                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5271                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5272                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5273                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5274                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5275                                 val |= (tp->split_mode_max_reqs <<
5276                                         PCIX_CAPS_SPLIT_SHIFT);
5277                 }
5278                 tw32(TG3PCI_X_CAPS, val);
5279         }
5280
5281         tw32_f(RDMAC_MODE, rdmac_mode);
5282         udelay(40);
5283
5284         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5285         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5286             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
5287                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5288         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5289         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5290         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5291         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5292         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5293 #if TG3_TSO_SUPPORT != 0
5294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5295                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5296 #endif
5297         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5298         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5299
5300         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5301                 err = tg3_load_5701_a0_firmware_fix(tp);
5302                 if (err)
5303                         return err;
5304         }
5305
5306 #if TG3_TSO_SUPPORT != 0
5307         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5308                 err = tg3_load_tso_firmware(tp);
5309                 if (err)
5310                         return err;
5311         }
5312 #endif
5313
5314         tp->tx_mode = TX_MODE_ENABLE;
5315         tw32_f(MAC_TX_MODE, tp->tx_mode);
5316         udelay(100);
5317
5318         tp->rx_mode = RX_MODE_ENABLE;
5319         tw32_f(MAC_RX_MODE, tp->rx_mode);
5320         udelay(10);
5321
5322         if (tp->link_config.phy_is_low_power) {
5323                 tp->link_config.phy_is_low_power = 0;
5324                 tp->link_config.speed = tp->link_config.orig_speed;
5325                 tp->link_config.duplex = tp->link_config.orig_duplex;
5326                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5327         }
5328
5329         tp->mi_mode = MAC_MI_MODE_BASE;
5330         tw32_f(MAC_MI_MODE, tp->mi_mode);
5331         udelay(80);
5332
5333         tw32(MAC_LED_CTRL, tp->led_ctrl);
5334
5335         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5336         if (tp->phy_id == PHY_ID_SERDES) {
5337                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5338                 udelay(10);
5339         }
5340         tw32_f(MAC_RX_MODE, tp->rx_mode);
5341         udelay(10);
5342
5343         if (tp->phy_id == PHY_ID_SERDES) {
5344                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5345                         /* Set drive transmission level to 1.2V  */
5346                         val = tr32(MAC_SERDES_CFG);
5347                         val &= 0xfffff000;
5348                         val |= 0x880;
5349                         tw32(MAC_SERDES_CFG, val);
5350                 }
5351                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5352                         tw32(MAC_SERDES_CFG, 0x616000);
5353         }
5354
5355         /* Prevent chip from dropping frames when flow control
5356          * is enabled.
5357          */
5358         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5359
5360         err = tg3_setup_phy(tp, 1);
5361         if (err)
5362                 return err;
5363
5364         if (tp->phy_id != PHY_ID_SERDES) {
5365                 u32 tmp;
5366
5367                 /* Clear CRC stats. */
5368                 tg3_readphy(tp, 0x1e, &tmp);
5369                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5370                 tg3_readphy(tp, 0x14, &tmp);
5371         }
5372
5373         __tg3_set_rx_mode(tp->dev);
5374
5375         /* Initialize receive rules. */
5376         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5377         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5378         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5379         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5380
5381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5383                 limit = 8;
5384         else
5385                 limit = 16;
5386         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5387                 limit -= 4;
5388         switch (limit) {
5389         case 16:
5390                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5391         case 15:
5392                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5393         case 14:
5394                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5395         case 13:
5396                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5397         case 12:
5398                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5399         case 11:
5400                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5401         case 10:
5402                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5403         case 9:
5404                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5405         case 8:
5406                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5407         case 7:
5408                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5409         case 6:
5410                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5411         case 5:
5412                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5413         case 4:
5414                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5415         case 3:
5416                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5417         case 2:
5418         case 1:
5419
5420         default:
5421                 break;
5422         };
5423
5424         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5425
5426         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5427                 tg3_enable_ints(tp);
5428
5429         return 0;
5430 }
5431
5432 /* Called at device open time to get the chip ready for
5433  * packet processing.  Invoked with tp->lock held.
5434  */
5435 static int tg3_init_hw(struct tg3 *tp)
5436 {
5437         int err;
5438
5439         /* Force the chip into D0. */
5440         err = tg3_set_power_state(tp, 0);
5441         if (err)
5442                 goto out;
5443
5444         tg3_switch_clocks(tp);
5445
5446         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5447
5448         err = tg3_reset_hw(tp);
5449
5450 out:
5451         return err;
5452 }
5453
5454 #define TG3_STAT_ADD32(PSTAT, REG) \
5455 do {    u32 __val = tr32(REG); \
5456         (PSTAT)->low += __val; \
5457         if ((PSTAT)->low < __val) \
5458                 (PSTAT)->high += 1; \
5459 } while (0)
5460
5461 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5462 {
5463         struct tg3_hw_stats *sp = tp->hw_stats;
5464
5465         if (!netif_carrier_ok(tp->dev))
5466                 return;
5467
5468         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5469         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5470         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5471         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5472         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5473         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5474         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5475         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5476         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5477         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5478         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5479         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5480         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5481
5482         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5483         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5484         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5485         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5486         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5487         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5488         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5489         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5490         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5491         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5492         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5493         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5494         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5495         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5496 }
5497
5498 static void tg3_timer(unsigned long __opaque)
5499 {
5500         struct tg3 *tp = (struct tg3 *) __opaque;
5501         unsigned long flags;
5502
5503         spin_lock_irqsave(&tp->lock, flags);
5504         spin_lock(&tp->tx_lock);
5505
5506         /* All of this garbage is because when using non-tagged
5507          * IRQ status the mailbox/status_block protocol the chip
5508          * uses with the cpu is race prone.
5509          */
5510         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5511                 tw32(GRC_LOCAL_CTRL,
5512                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5513         } else {
5514                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5515                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5516         }
5517
5518         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5519                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5520                 spin_unlock(&tp->tx_lock);
5521                 spin_unlock_irqrestore(&tp->lock, flags);
5522                 schedule_work(&tp->reset_task);
5523                 return;
5524         }
5525
5526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
5527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
5528                 tg3_periodic_fetch_stats(tp);
5529
5530         /* This part only runs once per second. */
5531         if (!--tp->timer_counter) {
5532                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5533                         u32 mac_stat;
5534                         int phy_event;
5535
5536                         mac_stat = tr32(MAC_STATUS);
5537
5538                         phy_event = 0;
5539                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5540                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5541                                         phy_event = 1;
5542                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5543                                 phy_event = 1;
5544
5545                         if (phy_event)
5546                                 tg3_setup_phy(tp, 0);
5547                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5548                         u32 mac_stat = tr32(MAC_STATUS);
5549                         int need_setup = 0;
5550
5551                         if (netif_carrier_ok(tp->dev) &&
5552                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5553                                 need_setup = 1;
5554                         }
5555                         if (! netif_carrier_ok(tp->dev) &&
5556                             (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5557                                 need_setup = 1;
5558                         }
5559                         if (need_setup) {
5560                                 tw32_f(MAC_MODE,
5561                                      (tp->mac_mode &
5562                                       ~MAC_MODE_PORT_MODE_MASK));
5563                                 udelay(40);
5564                                 tw32_f(MAC_MODE, tp->mac_mode);
5565                                 udelay(40);
5566                                 tg3_setup_phy(tp, 0);
5567                         }
5568                 }
5569
5570                 tp->timer_counter = tp->timer_multiplier;
5571         }
5572
5573         /* Heartbeat is only sent once every 120 seconds.  */
5574         if (!--tp->asf_counter) {
5575                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5576                         u32 val;
5577
5578                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5579                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5580                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5581                         val = tr32(GRC_RX_CPU_EVENT);
5582                         val |= (1 << 14);
5583                         tw32(GRC_RX_CPU_EVENT, val);
5584                 }
5585                 tp->asf_counter = tp->asf_multiplier;
5586         }
5587
5588         spin_unlock(&tp->tx_lock);
5589         spin_unlock_irqrestore(&tp->lock, flags);
5590
5591         tp->timer.expires = jiffies + tp->timer_offset;
5592         add_timer(&tp->timer);
5593 }
5594
5595 static int tg3_open(struct net_device *dev)
5596 {
5597         struct tg3 *tp = netdev_priv(dev);
5598         int err;
5599
5600         spin_lock_irq(&tp->lock);
5601         spin_lock(&tp->tx_lock);
5602
5603         tg3_disable_ints(tp);
5604         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5605
5606         spin_unlock(&tp->tx_lock);
5607         spin_unlock_irq(&tp->lock);
5608
5609         /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5610          * tp->tg3_flags is accurate at that new place.
5611          */
5612         err = tg3_alloc_consistent(tp);
5613         if (err)
5614                 return err;
5615
5616         err = request_irq(dev->irq, tg3_interrupt,
5617                           SA_SHIRQ, dev->name, dev);
5618
5619         if (err) {
5620                 tg3_free_consistent(tp);
5621                 return err;
5622         }
5623
5624         spin_lock_irq(&tp->lock);
5625         spin_lock(&tp->tx_lock);
5626
5627         err = tg3_init_hw(tp);
5628         if (err) {
5629                 tg3_halt(tp);
5630                 tg3_free_rings(tp);
5631         } else {
5632                 tp->timer_offset = HZ / 10;
5633                 tp->timer_counter = tp->timer_multiplier = 10;
5634                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5635
5636                 init_timer(&tp->timer);
5637                 tp->timer.expires = jiffies + tp->timer_offset;
5638                 tp->timer.data = (unsigned long) tp;
5639                 tp->timer.function = tg3_timer;
5640                 add_timer(&tp->timer);
5641
5642                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5643         }
5644
5645         spin_unlock(&tp->tx_lock);
5646         spin_unlock_irq(&tp->lock);
5647
5648         if (err) {
5649                 free_irq(dev->irq, dev);
5650                 tg3_free_consistent(tp);
5651                 return err;
5652         }
5653
5654         spin_lock_irq(&tp->lock);
5655         spin_lock(&tp->tx_lock);
5656
5657         tg3_enable_ints(tp);
5658
5659         spin_unlock(&tp->tx_lock);
5660         spin_unlock_irq(&tp->lock);
5661
5662         netif_start_queue(dev);
5663
5664         return 0;
5665 }
5666
5667 #if 0
5668 /*static*/ void tg3_dump_state(struct tg3 *tp)
5669 {
5670         u32 val32, val32_2, val32_3, val32_4, val32_5;
5671         u16 val16;
5672         int i;
5673
5674         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5675         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5676         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5677                val16, val32);
5678
5679         /* MAC block */
5680         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5681                tr32(MAC_MODE), tr32(MAC_STATUS));
5682         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5683                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5684         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5685                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5686         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5687                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5688
5689         /* Send data initiator control block */
5690         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5691                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5692         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5693                tr32(SNDDATAI_STATSCTRL));
5694
5695         /* Send data completion control block */
5696         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5697
5698         /* Send BD ring selector block */
5699         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5700                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5701
5702         /* Send BD initiator control block */
5703         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5704                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5705
5706         /* Send BD completion control block */
5707         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5708
5709         /* Receive list placement control block */
5710         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5711                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5712         printk("       RCVLPC_STATSCTRL[%08x]\n",
5713                tr32(RCVLPC_STATSCTRL));
5714
5715         /* Receive data and receive BD initiator control block */
5716         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5717                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5718
5719         /* Receive data completion control block */
5720         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5721                tr32(RCVDCC_MODE));
5722
5723         /* Receive BD initiator control block */
5724         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5725                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5726
5727         /* Receive BD completion control block */
5728         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5729                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5730
5731         /* Receive list selector control block */
5732         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5733                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5734
5735         /* Mbuf cluster free block */
5736         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5737                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5738
5739         /* Host coalescing control block */
5740         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5741                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5742         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5743                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5744                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5745         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5746                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5747                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5748         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5749                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5750         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5751                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5752
5753         /* Memory arbiter control block */
5754         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5755                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5756
5757         /* Buffer manager control block */
5758         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5759                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5760         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5761                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5762         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5763                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5764                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5765                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5766
5767         /* Read DMA control block */
5768         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5769                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5770
5771         /* Write DMA control block */
5772         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5773                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5774
5775         /* DMA completion block */
5776         printk("DEBUG: DMAC_MODE[%08x]\n",
5777                tr32(DMAC_MODE));
5778
5779         /* GRC block */
5780         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5781                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5782         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5783                tr32(GRC_LOCAL_CTRL));
5784
5785         /* TG3_BDINFOs */
5786         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5787                tr32(RCVDBDI_JUMBO_BD + 0x0),
5788                tr32(RCVDBDI_JUMBO_BD + 0x4),
5789                tr32(RCVDBDI_JUMBO_BD + 0x8),
5790                tr32(RCVDBDI_JUMBO_BD + 0xc));
5791         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5792                tr32(RCVDBDI_STD_BD + 0x0),
5793                tr32(RCVDBDI_STD_BD + 0x4),
5794                tr32(RCVDBDI_STD_BD + 0x8),
5795                tr32(RCVDBDI_STD_BD + 0xc));
5796         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5797                tr32(RCVDBDI_MINI_BD + 0x0),
5798                tr32(RCVDBDI_MINI_BD + 0x4),
5799                tr32(RCVDBDI_MINI_BD + 0x8),
5800                tr32(RCVDBDI_MINI_BD + 0xc));
5801
5802         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5803         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5804         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5805         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5806         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5807                val32, val32_2, val32_3, val32_4);
5808
5809         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5810         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5811         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5812         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5813         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5814                val32, val32_2, val32_3, val32_4);
5815
5816         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5817         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5818         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5819         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5820         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5821         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5822                val32, val32_2, val32_3, val32_4, val32_5);
5823
5824         /* SW status block */
5825         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5826                tp->hw_status->status,
5827                tp->hw_status->status_tag,
5828                tp->hw_status->rx_jumbo_consumer,
5829                tp->hw_status->rx_consumer,
5830                tp->hw_status->rx_mini_consumer,
5831                tp->hw_status->idx[0].rx_producer,
5832                tp->hw_status->idx[0].tx_consumer);
5833
5834         /* SW statistics block */
5835         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5836                ((u32 *)tp->hw_stats)[0],
5837                ((u32 *)tp->hw_stats)[1],
5838                ((u32 *)tp->hw_stats)[2],
5839                ((u32 *)tp->hw_stats)[3]);
5840
5841         /* Mailboxes */
5842         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5843                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5844                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5845                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5846                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5847
5848         /* NIC side send descriptors. */
5849         for (i = 0; i < 6; i++) {
5850                 unsigned long txd;
5851
5852                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5853                         + (i * sizeof(struct tg3_tx_buffer_desc));
5854                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5855                        i,
5856                        readl(txd + 0x0), readl(txd + 0x4),
5857                        readl(txd + 0x8), readl(txd + 0xc));
5858         }
5859
5860         /* NIC side RX descriptors. */
5861         for (i = 0; i < 6; i++) {
5862                 unsigned long rxd;
5863
5864                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5865                         + (i * sizeof(struct tg3_rx_buffer_desc));
5866                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5867                        i,
5868                        readl(rxd + 0x0), readl(rxd + 0x4),
5869                        readl(rxd + 0x8), readl(rxd + 0xc));
5870                 rxd += (4 * sizeof(u32));
5871                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5872                        i,
5873                        readl(rxd + 0x0), readl(rxd + 0x4),
5874                        readl(rxd + 0x8), readl(rxd + 0xc));
5875         }
5876
5877         for (i = 0; i < 6; i++) {
5878                 unsigned long rxd;
5879
5880                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5881                         + (i * sizeof(struct tg3_rx_buffer_desc));
5882                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5883                        i,
5884                        readl(rxd + 0x0), readl(rxd + 0x4),
5885                        readl(rxd + 0x8), readl(rxd + 0xc));
5886                 rxd += (4 * sizeof(u32));
5887                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5888                        i,
5889                        readl(rxd + 0x0), readl(rxd + 0x4),
5890                        readl(rxd + 0x8), readl(rxd + 0xc));
5891         }
5892 }
5893 #endif
5894
5895 static struct net_device_stats *tg3_get_stats(struct net_device *);
5896 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5897
5898 static int tg3_close(struct net_device *dev)
5899 {
5900         struct tg3 *tp = netdev_priv(dev);
5901
5902         netif_stop_queue(dev);
5903
5904         del_timer_sync(&tp->timer);
5905
5906         spin_lock_irq(&tp->lock);
5907         spin_lock(&tp->tx_lock);
5908 #if 0
5909         tg3_dump_state(tp);
5910 #endif
5911
5912         tg3_disable_ints(tp);
5913
5914         tg3_halt(tp);
5915         tg3_free_rings(tp);
5916         tp->tg3_flags &=
5917                 ~(TG3_FLAG_INIT_COMPLETE |
5918                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5919         netif_carrier_off(tp->dev);
5920
5921         spin_unlock(&tp->tx_lock);
5922         spin_unlock_irq(&tp->lock);
5923
5924         free_irq(dev->irq, dev);
5925
5926         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5927                sizeof(tp->net_stats_prev));
5928         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5929                sizeof(tp->estats_prev));
5930
5931         tg3_free_consistent(tp);
5932
5933         return 0;
5934 }
5935
5936 static inline unsigned long get_stat64(tg3_stat64_t *val)
5937 {
5938         unsigned long ret;
5939
5940 #if (BITS_PER_LONG == 32)
5941         ret = val->low;
5942 #else
5943         ret = ((u64)val->high << 32) | ((u64)val->low);
5944 #endif
5945         return ret;
5946 }
5947
5948 static unsigned long calc_crc_errors(struct tg3 *tp)
5949 {
5950         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5951
5952         if (tp->phy_id != PHY_ID_SERDES &&
5953             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5954              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5955                 unsigned long flags;
5956                 u32 val;
5957
5958                 spin_lock_irqsave(&tp->lock, flags);
5959                 tg3_readphy(tp, 0x1e, &val);
5960                 tg3_writephy(tp, 0x1e, val | 0x8000);
5961                 tg3_readphy(tp, 0x14, &val);
5962                 spin_unlock_irqrestore(&tp->lock, flags);
5963
5964                 tp->phy_crc_errors += val;
5965
5966                 return tp->phy_crc_errors;
5967         }
5968
5969         return get_stat64(&hw_stats->rx_fcs_errors);
5970 }
5971
5972 #define ESTAT_ADD(member) \
5973         estats->member =        old_estats->member + \
5974                                 get_stat64(&hw_stats->member)
5975
5976 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5977 {
5978         struct tg3_ethtool_stats *estats = &tp->estats;
5979         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5980         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5981
5982         if (!hw_stats)
5983                 return old_estats;
5984
5985         ESTAT_ADD(rx_octets);
5986         ESTAT_ADD(rx_fragments);
5987         ESTAT_ADD(rx_ucast_packets);
5988         ESTAT_ADD(rx_mcast_packets);
5989         ESTAT_ADD(rx_bcast_packets);
5990         ESTAT_ADD(rx_fcs_errors);
5991         ESTAT_ADD(rx_align_errors);
5992         ESTAT_ADD(rx_xon_pause_rcvd);
5993         ESTAT_ADD(rx_xoff_pause_rcvd);
5994         ESTAT_ADD(rx_mac_ctrl_rcvd);
5995         ESTAT_ADD(rx_xoff_entered);
5996         ESTAT_ADD(rx_frame_too_long_errors);
5997         ESTAT_ADD(rx_jabbers);
5998         ESTAT_ADD(rx_undersize_packets);
5999         ESTAT_ADD(rx_in_length_errors);
6000         ESTAT_ADD(rx_out_length_errors);
6001         ESTAT_ADD(rx_64_or_less_octet_packets);
6002         ESTAT_ADD(rx_65_to_127_octet_packets);
6003         ESTAT_ADD(rx_128_to_255_octet_packets);
6004         ESTAT_ADD(rx_256_to_511_octet_packets);
6005         ESTAT_ADD(rx_512_to_1023_octet_packets);
6006         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6007         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6008         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6009         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6010         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6011
6012         ESTAT_ADD(tx_octets);
6013         ESTAT_ADD(tx_collisions);
6014         ESTAT_ADD(tx_xon_sent);
6015         ESTAT_ADD(tx_xoff_sent);
6016         ESTAT_ADD(tx_flow_control);
6017         ESTAT_ADD(tx_mac_errors);
6018         ESTAT_ADD(tx_single_collisions);
6019         ESTAT_ADD(tx_mult_collisions);
6020         ESTAT_ADD(tx_deferred);
6021         ESTAT_ADD(tx_excessive_collisions);
6022         ESTAT_ADD(tx_late_collisions);
6023         ESTAT_ADD(tx_collide_2times);
6024         ESTAT_ADD(tx_collide_3times);
6025         ESTAT_ADD(tx_collide_4times);
6026         ESTAT_ADD(tx_collide_5times);
6027         ESTAT_ADD(tx_collide_6times);
6028         ESTAT_ADD(tx_collide_7times);
6029         ESTAT_ADD(tx_collide_8times);
6030         ESTAT_ADD(tx_collide_9times);
6031         ESTAT_ADD(tx_collide_10times);
6032         ESTAT_ADD(tx_collide_11times);
6033         ESTAT_ADD(tx_collide_12times);
6034         ESTAT_ADD(tx_collide_13times);
6035         ESTAT_ADD(tx_collide_14times);
6036         ESTAT_ADD(tx_collide_15times);
6037         ESTAT_ADD(tx_ucast_packets);
6038         ESTAT_ADD(tx_mcast_packets);
6039         ESTAT_ADD(tx_bcast_packets);
6040         ESTAT_ADD(tx_carrier_sense_errors);
6041         ESTAT_ADD(tx_discards);
6042         ESTAT_ADD(tx_errors);
6043
6044         ESTAT_ADD(dma_writeq_full);
6045         ESTAT_ADD(dma_write_prioq_full);
6046         ESTAT_ADD(rxbds_empty);
6047         ESTAT_ADD(rx_discards);
6048         ESTAT_ADD(rx_errors);
6049         ESTAT_ADD(rx_threshold_hit);
6050
6051         ESTAT_ADD(dma_readq_full);
6052         ESTAT_ADD(dma_read_prioq_full);
6053         ESTAT_ADD(tx_comp_queue_full);
6054
6055         ESTAT_ADD(ring_set_send_prod_index);
6056         ESTAT_ADD(ring_status_update);
6057         ESTAT_ADD(nic_irqs);
6058         ESTAT_ADD(nic_avoided_irqs);
6059         ESTAT_ADD(nic_tx_threshold_hit);
6060
6061         return estats;
6062 }
6063
6064 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6065 {
6066         struct tg3 *tp = netdev_priv(dev);
6067         struct net_device_stats *stats = &tp->net_stats;
6068         struct net_device_stats *old_stats = &tp->net_stats_prev;
6069         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6070
6071         if (!hw_stats)
6072                 return old_stats;
6073
6074         stats->rx_packets = old_stats->rx_packets +
6075                 get_stat64(&hw_stats->rx_ucast_packets) +
6076                 get_stat64(&hw_stats->rx_mcast_packets) +
6077                 get_stat64(&hw_stats->rx_bcast_packets);
6078                 
6079         stats->tx_packets = old_stats->tx_packets +
6080                 get_stat64(&hw_stats->tx_ucast_packets) +
6081                 get_stat64(&hw_stats->tx_mcast_packets) +
6082                 get_stat64(&hw_stats->tx_bcast_packets);
6083
6084         stats->rx_bytes = old_stats->rx_bytes +
6085                 get_stat64(&hw_stats->rx_octets);
6086         stats->tx_bytes = old_stats->tx_bytes +
6087                 get_stat64(&hw_stats->tx_octets);
6088
6089         stats->rx_errors = old_stats->rx_errors +
6090                 get_stat64(&hw_stats->rx_errors) +
6091                 get_stat64(&hw_stats->rx_discards);
6092         stats->tx_errors = old_stats->tx_errors +
6093                 get_stat64(&hw_stats->tx_errors) +
6094                 get_stat64(&hw_stats->tx_mac_errors) +
6095                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6096                 get_stat64(&hw_stats->tx_discards);
6097
6098         stats->multicast = old_stats->multicast +
6099                 get_stat64(&hw_stats->rx_mcast_packets);
6100         stats->collisions = old_stats->collisions +
6101                 get_stat64(&hw_stats->tx_collisions);
6102
6103         stats->rx_length_errors = old_stats->rx_length_errors +
6104                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6105                 get_stat64(&hw_stats->rx_undersize_packets);
6106
6107         stats->rx_over_errors = old_stats->rx_over_errors +
6108                 get_stat64(&hw_stats->rxbds_empty);
6109         stats->rx_frame_errors = old_stats->rx_frame_errors +
6110                 get_stat64(&hw_stats->rx_align_errors);
6111         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6112                 get_stat64(&hw_stats->tx_discards);
6113         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6114                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6115
6116         stats->rx_crc_errors = old_stats->rx_crc_errors +
6117                 calc_crc_errors(tp);
6118
6119         return stats;
6120 }
6121
6122 static inline u32 calc_crc(unsigned char *buf, int len)
6123 {
6124         u32 reg;
6125         u32 tmp;
6126         int j, k;
6127
6128         reg = 0xffffffff;
6129
6130         for (j = 0; j < len; j++) {
6131                 reg ^= buf[j];
6132
6133                 for (k = 0; k < 8; k++) {
6134                         tmp = reg & 0x01;
6135
6136                         reg >>= 1;
6137
6138                         if (tmp) {
6139                                 reg ^= 0xedb88320;
6140                         }
6141                 }
6142         }
6143
6144         return ~reg;
6145 }
6146
6147 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6148 {
6149         /* accept or reject all multicast frames */
6150         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6151         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6152         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6153         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6154 }
6155
6156 static void __tg3_set_rx_mode(struct net_device *dev)
6157 {
6158         struct tg3 *tp = netdev_priv(dev);
6159         u32 rx_mode;
6160
6161         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6162                                   RX_MODE_KEEP_VLAN_TAG);
6163
6164         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6165          * flag clear.
6166          */
6167 #if TG3_VLAN_TAG_USED
6168         if (!tp->vlgrp &&
6169             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6170                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6171 #else
6172         /* By definition, VLAN is disabled always in this
6173          * case.
6174          */
6175         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6176                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6177 #endif
6178
6179         if (dev->flags & IFF_PROMISC) {
6180                 /* Promiscuous mode. */
6181                 rx_mode |= RX_MODE_PROMISC;
6182         } else if (dev->flags & IFF_ALLMULTI) {
6183                 /* Accept all multicast. */
6184                 tg3_set_multi (tp, 1);
6185         } else if (dev->mc_count < 1) {
6186                 /* Reject all multicast. */
6187                 tg3_set_multi (tp, 0);
6188         } else {
6189                 /* Accept one or more multicast(s). */
6190                 struct dev_mc_list *mclist;
6191                 unsigned int i;
6192                 u32 mc_filter[4] = { 0, };
6193                 u32 regidx;
6194                 u32 bit;
6195                 u32 crc;
6196
6197                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6198                      i++, mclist = mclist->next) {
6199
6200                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6201                         bit = ~crc & 0x7f;
6202                         regidx = (bit & 0x60) >> 5;
6203                         bit &= 0x1f;
6204                         mc_filter[regidx] |= (1 << bit);
6205                 }
6206
6207                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6208                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6209                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6210                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6211         }
6212
6213         if (rx_mode != tp->rx_mode) {
6214                 tp->rx_mode = rx_mode;
6215                 tw32_f(MAC_RX_MODE, rx_mode);
6216                 udelay(10);
6217         }
6218 }
6219
6220 static void tg3_set_rx_mode(struct net_device *dev)
6221 {
6222         struct tg3 *tp = netdev_priv(dev);
6223
6224         spin_lock_irq(&tp->lock);
6225         __tg3_set_rx_mode(dev);
6226         spin_unlock_irq(&tp->lock);
6227 }
6228
6229 #define TG3_REGDUMP_LEN         (32 * 1024)
6230
6231 static int tg3_get_regs_len(struct net_device *dev)
6232 {
6233         return TG3_REGDUMP_LEN;
6234 }
6235
6236 static void tg3_get_regs(struct net_device *dev,
6237                 struct ethtool_regs *regs, void *_p)
6238 {
6239         u32 *p = _p;
6240         struct tg3 *tp = netdev_priv(dev);
6241         u8 *orig_p = _p;
6242         int i;
6243
6244         regs->version = 0;
6245
6246         memset(p, 0, TG3_REGDUMP_LEN);
6247
6248         spin_lock_irq(&tp->lock);
6249         spin_lock(&tp->tx_lock);
6250
6251 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6252 #define GET_REG32_LOOP(base,len)                \
6253 do {    p = (u32 *)(orig_p + (base));           \
6254         for (i = 0; i < len; i += 4)            \
6255                 __GET_REG32((base) + i);        \
6256 } while (0)
6257 #define GET_REG32_1(reg)                        \
6258 do {    p = (u32 *)(orig_p + (reg));            \
6259         __GET_REG32((reg));                     \
6260 } while (0)
6261
6262         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6263         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6264         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6265         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6266         GET_REG32_1(SNDDATAC_MODE);
6267         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6268         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6269         GET_REG32_1(SNDBDC_MODE);
6270         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6271         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6272         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6273         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6274         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6275         GET_REG32_1(RCVDCC_MODE);
6276         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6277         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6278         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6279         GET_REG32_1(MBFREE_MODE);
6280         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6281         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6282         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6283         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6284         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6285         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6286         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6287         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6288         GET_REG32_LOOP(FTQ_RESET, 0x120);
6289         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6290         GET_REG32_1(DMAC_MODE);
6291         GET_REG32_LOOP(GRC_MODE, 0x4c);
6292         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6293                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6294
6295 #undef __GET_REG32
6296 #undef GET_REG32_LOOP
6297 #undef GET_REG32_1
6298
6299         spin_unlock(&tp->tx_lock);
6300         spin_unlock_irq(&tp->lock);
6301 }
6302
6303 static int tg3_get_eeprom_len(struct net_device *dev)
6304 {
6305         return EEPROM_CHIP_SIZE;
6306 }
6307
6308 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6309                                                  u32 offset, u32 *val);
6310 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6311 {
6312         struct tg3 *tp = dev->priv;
6313         int ret;
6314         u8  *pd;
6315         u32 i, offset, len, val, b_offset, b_count;
6316
6317         offset = eeprom->offset;
6318         len = eeprom->len;
6319         eeprom->len = 0;
6320
6321         ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
6322         if (ret)
6323                 return ret;
6324         eeprom->magic = swab32(eeprom->magic);
6325
6326         if (offset & 3) {
6327                 /* adjustments to start on required 4 byte boundary */
6328                 b_offset = offset & 3;
6329                 b_count = 4 - b_offset;
6330                 if (b_count > len) {
6331                         /* i.e. offset=1 len=2 */
6332                         b_count = len;
6333                 }
6334                 ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
6335                 if (ret)
6336                         return ret;
6337                 memcpy(data, ((char*)&val) + b_offset, b_count);
6338                 len -= b_count;
6339                 offset += b_count;
6340                 eeprom->len += b_count;
6341         }
6342
6343         /* read bytes upto the last 4 byte boundary */
6344         pd = &data[eeprom->len];
6345         for (i = 0; i < (len - (len & 3)); i += 4) {
6346                 ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
6347                                 (u32*)(pd + i));
6348                 if (ret) {
6349                         eeprom->len += i;
6350                         return ret;
6351                 }
6352         }
6353         eeprom->len += i;
6354
6355         if (len & 3) {
6356                 /* read last bytes not ending on 4 byte boundary */
6357                 pd = &data[eeprom->len];
6358                 b_count = len & 3;
6359                 b_offset = offset + len - b_count;
6360                 ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
6361                 if (ret)
6362                         return ret;
6363                 memcpy(pd, ((char*)&val), b_count);
6364                 eeprom->len += b_count;
6365         }
6366         return 0;
6367 }
6368
6369 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6370 {
6371         struct tg3 *tp = netdev_priv(dev);
6372   
6373         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6374                                         tp->link_config.phy_is_low_power)
6375                 return -EAGAIN;
6376
6377         cmd->supported = (SUPPORTED_Autoneg);
6378
6379         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6380                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6381                                    SUPPORTED_1000baseT_Full);
6382
6383         if (tp->phy_id != PHY_ID_SERDES)
6384                 cmd->supported |= (SUPPORTED_100baseT_Half |
6385                                   SUPPORTED_100baseT_Full |
6386                                   SUPPORTED_10baseT_Half |
6387                                   SUPPORTED_10baseT_Full |
6388                                   SUPPORTED_MII);
6389         else
6390                 cmd->supported |= SUPPORTED_FIBRE;
6391   
6392         cmd->advertising = tp->link_config.advertising;
6393         cmd->speed = tp->link_config.active_speed;
6394         cmd->duplex = tp->link_config.active_duplex;
6395         cmd->port = 0;
6396         cmd->phy_address = PHY_ADDR;
6397         cmd->transceiver = 0;
6398         cmd->autoneg = tp->link_config.autoneg;
6399         cmd->maxtxpkt = 0;
6400         cmd->maxrxpkt = 0;
6401         return 0;
6402 }
6403   
6404 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6405 {
6406         struct tg3 *tp = netdev_priv(dev);
6407   
6408         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6409             tp->link_config.phy_is_low_power)
6410                 return -EAGAIN;
6411
6412         if (tp->phy_id == PHY_ID_SERDES) {
6413                 /* These are the only valid advertisement bits allowed.  */
6414                 if (cmd->autoneg == AUTONEG_ENABLE &&
6415                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6416                                           ADVERTISED_1000baseT_Full |
6417                                           ADVERTISED_Autoneg |
6418                                           ADVERTISED_FIBRE)))
6419                         return -EINVAL;
6420         }
6421
6422         spin_lock_irq(&tp->lock);
6423         spin_lock(&tp->tx_lock);
6424
6425         tp->link_config.autoneg = cmd->autoneg;
6426         if (cmd->autoneg == AUTONEG_ENABLE) {
6427                 tp->link_config.advertising = cmd->advertising;
6428                 tp->link_config.speed = SPEED_INVALID;
6429                 tp->link_config.duplex = DUPLEX_INVALID;
6430         } else {
6431                 tp->link_config.advertising = 0;
6432                 tp->link_config.speed = cmd->speed;
6433                 tp->link_config.duplex = cmd->duplex;
6434         }
6435   
6436         tg3_setup_phy(tp, 1);
6437         spin_unlock(&tp->tx_lock);
6438         spin_unlock_irq(&tp->lock);
6439   
6440         return 0;
6441 }
6442   
6443 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6444 {
6445         struct tg3 *tp = netdev_priv(dev);
6446   
6447         strcpy(info->driver, DRV_MODULE_NAME);
6448         strcpy(info->version, DRV_MODULE_VERSION);
6449         strcpy(info->bus_info, pci_name(tp->pdev));
6450 }
6451   
6452 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6453 {
6454         struct tg3 *tp = netdev_priv(dev);
6455   
6456         wol->supported = WAKE_MAGIC;
6457         wol->wolopts = 0;
6458         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6459                 wol->wolopts = WAKE_MAGIC;
6460         memset(&wol->sopass, 0, sizeof(wol->sopass));
6461 }
6462   
6463 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6464 {
6465         struct tg3 *tp = netdev_priv(dev);
6466   
6467         if (wol->wolopts & ~WAKE_MAGIC)
6468                 return -EINVAL;
6469         if ((wol->wolopts & WAKE_MAGIC) &&
6470             tp->phy_id == PHY_ID_SERDES &&
6471             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6472                 return -EINVAL;
6473   
6474         spin_lock_irq(&tp->lock);
6475         if (wol->wolopts & WAKE_MAGIC)
6476                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6477         else
6478                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6479         spin_unlock_irq(&tp->lock);
6480   
6481         return 0;
6482 }
6483   
6484 static u32 tg3_get_msglevel(struct net_device *dev)
6485 {
6486         struct tg3 *tp = netdev_priv(dev);
6487         return tp->msg_enable;
6488 }
6489   
6490 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6491 {
6492         struct tg3 *tp = netdev_priv(dev);
6493         tp->msg_enable = value;
6494 }
6495   
6496 #if TG3_TSO_SUPPORT != 0
6497 static int tg3_set_tso(struct net_device *dev, u32 value)
6498 {
6499         struct tg3 *tp = netdev_priv(dev);
6500
6501         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6502                 if (value)
6503                         return -EINVAL;
6504                 return 0;
6505         }
6506         return ethtool_op_set_tso(dev, value);
6507 }
6508 #endif
6509   
6510 static int tg3_nway_reset(struct net_device *dev)
6511 {
6512         struct tg3 *tp = netdev_priv(dev);
6513         u32 bmcr;
6514         int r;
6515   
6516         spin_lock_irq(&tp->lock);
6517         tg3_readphy(tp, MII_BMCR, &bmcr);
6518         tg3_readphy(tp, MII_BMCR, &bmcr);
6519         r = -EINVAL;
6520         if (bmcr & BMCR_ANENABLE) {
6521                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6522                 r = 0;
6523         }
6524         spin_unlock_irq(&tp->lock);
6525   
6526         return r;
6527 }
6528   
6529 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6530 {
6531         struct tg3 *tp = netdev_priv(dev);
6532   
6533         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6534         ering->rx_mini_max_pending = 0;
6535         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6536
6537         ering->rx_pending = tp->rx_pending;
6538         ering->rx_mini_pending = 0;
6539         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6540         ering->tx_pending = tp->tx_pending;
6541 }
6542   
6543 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6544 {
6545         struct tg3 *tp = netdev_priv(dev);
6546   
6547         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6548             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6549             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6550                 return -EINVAL;
6551   
6552         tg3_netif_stop(tp);
6553         spin_lock_irq(&tp->lock);
6554         spin_lock(&tp->tx_lock);
6555   
6556         tp->rx_pending = ering->rx_pending;
6557
6558         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6559             tp->rx_pending > 63)
6560                 tp->rx_pending = 63;
6561         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6562         tp->tx_pending = ering->tx_pending;
6563
6564         tg3_halt(tp);
6565         tg3_init_hw(tp);
6566         netif_wake_queue(tp->dev);
6567         spin_unlock(&tp->tx_lock);
6568         spin_unlock_irq(&tp->lock);
6569         tg3_netif_start(tp);
6570   
6571         return 0;
6572 }
6573   
6574 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6575 {
6576         struct tg3 *tp = netdev_priv(dev);
6577   
6578         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6579         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6580         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6581 }
6582   
6583 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6584 {
6585         struct tg3 *tp = netdev_priv(dev);
6586   
6587         tg3_netif_stop(tp);
6588         spin_lock_irq(&tp->lock);
6589         spin_lock(&tp->tx_lock);
6590         if (epause->autoneg)
6591                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6592         else
6593                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6594         if (epause->rx_pause)
6595                 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6596         else
6597                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6598         if (epause->tx_pause)
6599                 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6600         else
6601                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6602         tg3_halt(tp);
6603         tg3_init_hw(tp);
6604         spin_unlock(&tp->tx_lock);
6605         spin_unlock_irq(&tp->lock);
6606         tg3_netif_start(tp);
6607   
6608         return 0;
6609 }
6610   
6611 static u32 tg3_get_rx_csum(struct net_device *dev)
6612 {
6613         struct tg3 *tp = netdev_priv(dev);
6614         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6615 }
6616   
6617 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6618 {
6619         struct tg3 *tp = netdev_priv(dev);
6620   
6621         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6622                 if (data != 0)
6623                         return -EINVAL;
6624                 return 0;
6625         }
6626   
6627         spin_lock_irq(&tp->lock);
6628         if (data)
6629                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6630         else
6631                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6632         spin_unlock_irq(&tp->lock);
6633   
6634         return 0;
6635 }
6636   
6637 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6638 {
6639         struct tg3 *tp = netdev_priv(dev);
6640   
6641         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6642                 if (data != 0)
6643                         return -EINVAL;
6644                 return 0;
6645         }
6646   
6647         if (data)
6648                 dev->features |= NETIF_F_IP_CSUM;
6649         else
6650                 dev->features &= ~NETIF_F_IP_CSUM;
6651
6652         return 0;
6653 }
6654
6655 static int tg3_get_stats_count (struct net_device *dev)
6656 {
6657         return TG3_NUM_STATS;
6658 }
6659
6660 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6661 {
6662         switch (stringset) {
6663         case ETH_SS_STATS:
6664                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6665                 break;
6666         default:
6667                 WARN_ON(1);     /* we need a WARN() */
6668                 break;
6669         }
6670 }
6671
6672 static void tg3_get_ethtool_stats (struct net_device *dev,
6673                                    struct ethtool_stats *estats, u64 *tmp_stats)
6674 {
6675         struct tg3 *tp = dev->priv;
6676         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6677 }
6678
6679 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6680 {
6681         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
6682         struct tg3 *tp = netdev_priv(dev);
6683         int err;
6684
6685         switch(cmd) {
6686         case SIOCGMIIPHY:
6687                 data->phy_id = PHY_ADDR;
6688
6689                 /* fallthru */
6690         case SIOCGMIIREG: {
6691                 u32 mii_regval;
6692
6693                 spin_lock_irq(&tp->lock);
6694                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6695                 spin_unlock_irq(&tp->lock);
6696
6697                 data->val_out = mii_regval;
6698
6699                 return err;
6700         }
6701
6702         case SIOCSMIIREG:
6703                 if (!capable(CAP_NET_ADMIN))
6704                         return -EPERM;
6705
6706                 spin_lock_irq(&tp->lock);
6707                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6708                 spin_unlock_irq(&tp->lock);
6709
6710                 return err;
6711
6712         default:
6713                 /* do nothing */
6714                 break;
6715         }
6716         return -EOPNOTSUPP;
6717 }
6718
6719 #if TG3_VLAN_TAG_USED
6720 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6721 {
6722         struct tg3 *tp = netdev_priv(dev);
6723
6724         spin_lock_irq(&tp->lock);
6725         spin_lock(&tp->tx_lock);
6726
6727         tp->vlgrp = grp;
6728
6729         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6730         __tg3_set_rx_mode(dev);
6731
6732         spin_unlock(&tp->tx_lock);
6733         spin_unlock_irq(&tp->lock);
6734 }
6735
6736 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6737 {
6738         struct tg3 *tp = netdev_priv(dev);
6739
6740         spin_lock_irq(&tp->lock);
6741         spin_lock(&tp->tx_lock);
6742         if (tp->vlgrp)
6743                 tp->vlgrp->vlan_devices[vid] = NULL;
6744         spin_unlock(&tp->tx_lock);
6745         spin_unlock_irq(&tp->lock);
6746 }
6747 #endif
6748
6749 static struct ethtool_ops tg3_ethtool_ops = {
6750         .get_settings           = tg3_get_settings,
6751         .set_settings           = tg3_set_settings,
6752         .get_drvinfo            = tg3_get_drvinfo,
6753         .get_regs_len           = tg3_get_regs_len,
6754         .get_regs               = tg3_get_regs,
6755         .get_wol                = tg3_get_wol,
6756         .set_wol                = tg3_set_wol,
6757         .get_msglevel           = tg3_get_msglevel,
6758         .set_msglevel           = tg3_set_msglevel,
6759         .nway_reset             = tg3_nway_reset,
6760         .get_link               = ethtool_op_get_link,
6761         .get_eeprom_len         = tg3_get_eeprom_len,
6762         .get_eeprom             = tg3_get_eeprom,
6763         .get_ringparam          = tg3_get_ringparam,
6764         .set_ringparam          = tg3_set_ringparam,
6765         .get_pauseparam         = tg3_get_pauseparam,
6766         .set_pauseparam         = tg3_set_pauseparam,
6767         .get_rx_csum            = tg3_get_rx_csum,
6768         .set_rx_csum            = tg3_set_rx_csum,
6769         .get_tx_csum            = ethtool_op_get_tx_csum,
6770         .set_tx_csum            = tg3_set_tx_csum,
6771         .get_sg                 = ethtool_op_get_sg,
6772         .set_sg                 = ethtool_op_set_sg,
6773 #if TG3_TSO_SUPPORT != 0
6774         .get_tso                = ethtool_op_get_tso,
6775         .set_tso                = tg3_set_tso,
6776 #endif
6777         .get_strings            = tg3_get_strings,
6778         .get_stats_count        = tg3_get_stats_count,
6779         .get_ethtool_stats      = tg3_get_ethtool_stats,
6780 };
6781
6782 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6783 static void __devinit tg3_nvram_init(struct tg3 *tp)
6784 {
6785         int j;
6786
6787         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6788                 return;
6789
6790         tw32_f(GRC_EEPROM_ADDR,
6791              (EEPROM_ADDR_FSM_RESET |
6792               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6793                EEPROM_ADDR_CLKPERD_SHIFT)));
6794
6795         /* XXX schedule_timeout() ... */
6796         for (j = 0; j < 100; j++)
6797                 udelay(10);
6798
6799         /* Enable seeprom accesses. */
6800         tw32_f(GRC_LOCAL_CTRL,
6801              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6802         udelay(100);
6803
6804         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6805             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6806                 u32 nvcfg1;
6807
6808                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6809                         u32 nvaccess = tr32(NVRAM_ACCESS);
6810
6811                         tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6812                 }
6813
6814                 nvcfg1 = tr32(NVRAM_CFG1);
6815
6816                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6817                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6818                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6819                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6820                 } else {
6821                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6822                         tw32(NVRAM_CFG1, nvcfg1);
6823                 }
6824
6825                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6826                         u32 nvaccess = tr32(NVRAM_ACCESS);
6827
6828                         tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6829                 }
6830         } else {
6831                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6832         }
6833 }
6834
6835 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6836                                                  u32 offset, u32 *val)
6837 {
6838         u32 tmp;
6839         int i;
6840
6841         if (offset > EEPROM_ADDR_ADDR_MASK ||
6842             (offset % 4) != 0)
6843                 return -EINVAL;
6844
6845         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6846                                         EEPROM_ADDR_DEVID_MASK |
6847                                         EEPROM_ADDR_READ);
6848         tw32(GRC_EEPROM_ADDR,
6849              tmp |
6850              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6851              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6852               EEPROM_ADDR_ADDR_MASK) |
6853              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6854
6855         for (i = 0; i < 10000; i++) {
6856                 tmp = tr32(GRC_EEPROM_ADDR);
6857
6858                 if (tmp & EEPROM_ADDR_COMPLETE)
6859                         break;
6860                 udelay(100);
6861         }
6862         if (!(tmp & EEPROM_ADDR_COMPLETE))
6863                 return -EBUSY;
6864
6865         *val = tr32(GRC_EEPROM_DATA);
6866         return 0;
6867 }
6868
6869 static int __devinit tg3_nvram_read(struct tg3 *tp,
6870                                     u32 offset, u32 *val)
6871 {
6872         int i;
6873
6874         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6875                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6876                 return -EINVAL;
6877         }
6878
6879         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6880                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6881
6882         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6883                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6884                           NVRAM_BUFFERED_PAGE_POS) +
6885                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6886
6887         if (offset > NVRAM_ADDR_MSK)
6888                 return -EINVAL;
6889
6890         tg3_nvram_lock(tp);
6891
6892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6893                 u32 nvaccess = tr32(NVRAM_ACCESS);
6894
6895                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
6896         }
6897
6898         tw32(NVRAM_ADDR, offset);
6899         tw32(NVRAM_CMD,
6900              NVRAM_CMD_RD | NVRAM_CMD_GO |
6901              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6902
6903         /* Wait for done bit to clear. */
6904         for (i = 0; i < 1000; i++) {
6905                 udelay(10);
6906                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
6907                         udelay(10);
6908                         *val = swab32(tr32(NVRAM_RDDATA));
6909                         break;
6910                 }
6911         }
6912
6913         tg3_nvram_unlock(tp);
6914
6915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6916                 u32 nvaccess = tr32(NVRAM_ACCESS);
6917
6918                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
6919         }
6920
6921         if (i >= 1000)
6922                 return -EBUSY;
6923
6924         return 0;
6925 }
6926
6927 struct subsys_tbl_ent {
6928         u16 subsys_vendor, subsys_devid;
6929         u32 phy_id;
6930 };
6931
6932 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6933         /* Broadcom boards. */
6934         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6935         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6936         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6937         { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
6938         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6939         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6940         { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
6941         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6942         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6943         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6944         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6945
6946         /* 3com boards. */
6947         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6948         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6949         { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
6950         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6951         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6952
6953         /* DELL boards. */
6954         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6955         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6956         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6957         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6958
6959         /* Compaq boards. */
6960         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6961         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6962         { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
6963         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6964         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6965
6966         /* IBM boards. */
6967         { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6968 };
6969
6970 static int __devinit tg3_phy_probe(struct tg3 *tp)
6971 {
6972         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6973         u32 hw_phy_id, hw_phy_id_masked;
6974         u32 val;
6975         int i, eeprom_signature_found, err;
6976
6977         tp->phy_id = PHY_ID_INVALID;
6978         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6979                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6980                      tp->pdev->subsystem_vendor) &&
6981                     (subsys_id_to_phy_id[i].subsys_devid ==
6982                      tp->pdev->subsystem_device)) {
6983                         tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6984                         break;
6985                 }
6986         }
6987
6988         eeprom_phy_id = PHY_ID_INVALID;
6989         eeprom_signature_found = 0;
6990         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6991         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6992                 u32 nic_cfg, led_cfg;
6993
6994                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6995                 tp->nic_sram_data_cfg = nic_cfg;
6996
6997                 eeprom_signature_found = 1;
6998
6999                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7000                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
7001                         eeprom_phy_id = PHY_ID_SERDES;
7002                 } else {
7003                         u32 nic_phy_id;
7004
7005                         tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7006                         if (nic_phy_id != 0) {
7007                                 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7008                                 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7009
7010                                 eeprom_phy_id  = (id1 >> 16) << 10;
7011                                 eeprom_phy_id |= (id2 & 0xfc00) << 16;
7012                                 eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7013                         }
7014                 }
7015
7016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7017                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
7018                         led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7019                                     SHASTA_EXT_LED_MODE_MASK);
7020                 } else
7021                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7022
7023                 switch (led_cfg) {
7024                 default:
7025                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7026                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7027                         break;
7028
7029                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7030                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7031                         break;
7032
7033                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7034                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7035                         break;
7036
7037                 case SHASTA_EXT_LED_SHARED:
7038                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7039                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7040                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7041                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7042                                                  LED_CTRL_MODE_PHY_2);
7043                         break;
7044
7045                 case SHASTA_EXT_LED_MAC:
7046                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7047                         break;
7048
7049                 case SHASTA_EXT_LED_COMBO:
7050                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7051                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7052                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7053                                                  LED_CTRL_MODE_PHY_2);
7054                         break;
7055
7056                 };
7057
7058                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7059                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7060                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7061                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7062
7063                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
7064                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
7065                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
7066                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7067                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7068
7069                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7070                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7071                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7072                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7073                 }
7074                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7075                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7076         }
7077
7078         /* Reading the PHY ID register can conflict with ASF
7079          * firwmare access to the PHY hardware.
7080          */
7081         err = 0;
7082         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7083                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7084         } else {
7085                 /* Now read the physical PHY_ID from the chip and verify
7086                  * that it is sane.  If it doesn't look good, we fall back
7087                  * to either the hard-coded table based PHY_ID and failing
7088                  * that the value found in the eeprom area.
7089                  */
7090                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7091                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7092
7093                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7094                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7095                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7096
7097                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7098         }
7099
7100         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7101                 tp->phy_id = hw_phy_id;
7102         } else {
7103                 /* phy_id currently holds the value found in the
7104                  * subsys_id_to_phy_id[] table or PHY_ID_INVALID
7105                  * if a match was not found there.
7106                  */
7107                 if (tp->phy_id == PHY_ID_INVALID) {
7108                         if (!eeprom_signature_found ||
7109                             !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
7110                                 return -ENODEV;
7111                         tp->phy_id = eeprom_phy_id;
7112                 }
7113         }
7114
7115         if (tp->phy_id != PHY_ID_SERDES &&
7116             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7117                 u32 bmsr, adv_reg, tg3_ctrl;
7118
7119                 tg3_readphy(tp, MII_BMSR, &bmsr);
7120                 tg3_readphy(tp, MII_BMSR, &bmsr);
7121
7122                 if (bmsr & BMSR_LSTATUS)
7123                         goto skip_phy_reset;
7124                     
7125                 err = tg3_phy_reset(tp);
7126                 if (err)
7127                         return err;
7128
7129                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7130                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7131                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7132                 tg3_ctrl = 0;
7133                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7134                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7135                                     MII_TG3_CTRL_ADV_1000_FULL);
7136                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7137                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7138                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7139                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7140                 }
7141
7142                 if (!tg3_copper_is_advertising_all(tp)) {
7143                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7144
7145                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7146                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7147
7148                         tg3_writephy(tp, MII_BMCR,
7149                                      BMCR_ANENABLE | BMCR_ANRESTART);
7150                 }
7151                 tg3_phy_set_wirespeed(tp);
7152
7153                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7154                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7155                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7156         }
7157
7158 skip_phy_reset:
7159         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7160                 err = tg3_init_5401phy_dsp(tp);
7161                 if (err)
7162                         return err;
7163         }
7164
7165         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7166                 err = tg3_init_5401phy_dsp(tp);
7167         }
7168
7169         if (!eeprom_signature_found)
7170                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7171
7172         if (tp->phy_id == PHY_ID_SERDES)
7173                 tp->link_config.advertising =
7174                         (ADVERTISED_1000baseT_Half |
7175                          ADVERTISED_1000baseT_Full |
7176                          ADVERTISED_Autoneg |
7177                          ADVERTISED_FIBRE);
7178         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7179                 tp->link_config.advertising &=
7180                         ~(ADVERTISED_1000baseT_Half |
7181                           ADVERTISED_1000baseT_Full);
7182
7183         return err;
7184 }
7185
7186 static void __devinit tg3_read_partno(struct tg3 *tp)
7187 {
7188         unsigned char vpd_data[256];
7189         int i;
7190
7191         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
7192                 /* Sun decided not to put the necessary bits in the
7193                  * NVRAM of their onboard tg3 parts :(
7194                  */
7195                 strcpy(tp->board_part_number, "Sun 5704");
7196                 return;
7197         }
7198
7199         for (i = 0; i < 256; i += 4) {
7200                 u32 tmp;
7201
7202                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7203                         goto out_not_found;
7204
7205                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7206                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7207                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7208                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7209         }
7210
7211         /* Now parse and find the part number. */
7212         for (i = 0; i < 256; ) {
7213                 unsigned char val = vpd_data[i];
7214                 int block_end;
7215
7216                 if (val == 0x82 || val == 0x91) {
7217                         i = (i + 3 +
7218                              (vpd_data[i + 1] +
7219                               (vpd_data[i + 2] << 8)));
7220                         continue;
7221                 }
7222
7223                 if (val != 0x90)
7224                         goto out_not_found;
7225
7226                 block_end = (i + 3 +
7227                              (vpd_data[i + 1] +
7228                               (vpd_data[i + 2] << 8)));
7229                 i += 3;
7230                 while (i < block_end) {
7231                         if (vpd_data[i + 0] == 'P' &&
7232                             vpd_data[i + 1] == 'N') {
7233                                 int partno_len = vpd_data[i + 2];
7234
7235                                 if (partno_len > 24)
7236                                         goto out_not_found;
7237
7238                                 memcpy(tp->board_part_number,
7239                                        &vpd_data[i + 3],
7240                                        partno_len);
7241
7242                                 /* Success. */
7243                                 return;
7244                         }
7245                 }
7246
7247                 /* Part number not found. */
7248                 goto out_not_found;
7249         }
7250
7251 out_not_found:
7252         strcpy(tp->board_part_number, "none");
7253 }
7254
7255 #ifdef CONFIG_SPARC64
7256 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
7257 {
7258         struct pci_dev *pdev = tp->pdev;
7259         struct pcidev_cookie *pcp = pdev->sysdata;
7260
7261         if (pcp != NULL) {
7262                 int node = pcp->prom_node;
7263                 u32 venid, devid;
7264                 int err;
7265
7266                 err = prom_getproperty(node, "subsystem-vendor-id",
7267                                        (char *) &venid, sizeof(venid));
7268                 if (err == 0 || err == -1)
7269                         return 0;
7270                 err = prom_getproperty(node, "subsystem-id",
7271                                        (char *) &devid, sizeof(devid));
7272                 if (err == 0 || err == -1)
7273                         return 0;
7274
7275                 if (venid == PCI_VENDOR_ID_SUN &&
7276                     devid == PCI_DEVICE_ID_TIGON3_5704)
7277                         return 1;
7278         }
7279         return 0;
7280 }
7281 #endif
7282
7283 static int __devinit tg3_get_invariants(struct tg3 *tp)
7284 {
7285         u32 misc_ctrl_reg;
7286         u32 cacheline_sz_reg;
7287         u32 pci_state_reg, grc_misc_cfg;
7288         u32 val;
7289         u16 pci_cmd;
7290         int err;
7291
7292 #ifdef CONFIG_SPARC64
7293         if (tg3_is_sun_5704(tp))
7294                 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
7295 #endif
7296
7297         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7298          * reordering to the mailbox registers done by the host
7299          * controller can cause major troubles.  We read back from
7300          * every mailbox register write to force the writes to be
7301          * posted to the chip in order.
7302          */
7303         if (pci_find_device(PCI_VENDOR_ID_INTEL,
7304                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
7305             pci_find_device(PCI_VENDOR_ID_INTEL,
7306                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
7307             pci_find_device(PCI_VENDOR_ID_INTEL,
7308                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
7309             pci_find_device(PCI_VENDOR_ID_INTEL,
7310                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
7311             pci_find_device(PCI_VENDOR_ID_AMD,
7312                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
7313                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7314
7315         /* Force memory write invalidate off.  If we leave it on,
7316          * then on 5700_BX chips we have to enable a workaround.
7317          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7318          * to match the cacheline size.  The Broadcom driver have this
7319          * workaround but turns MWI off all the times so never uses
7320          * it.  This seems to suggest that the workaround is insufficient.
7321          */
7322         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7323         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7324         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7325
7326         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7327          * has the register indirect write enable bit set before
7328          * we try to access any of the MMIO registers.  It is also
7329          * critical that the PCI-X hw workaround situation is decided
7330          * before that as well.
7331          */
7332         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7333                               &misc_ctrl_reg);
7334
7335         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7336                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7337
7338         /* Initialize misc host control in PCI block. */
7339         tp->misc_host_ctrl |= (misc_ctrl_reg &
7340                                MISC_HOST_CTRL_CHIPREV);
7341         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7342                                tp->misc_host_ctrl);
7343
7344         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7345                               &cacheline_sz_reg);
7346
7347         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7348         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7349         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7350         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7351
7352         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7353                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7354
7355         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7356             tp->pci_lat_timer < 64) {
7357                 tp->pci_lat_timer = 64;
7358
7359                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7360                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7361                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7362                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7363
7364                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7365                                        cacheline_sz_reg);
7366         }
7367
7368         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7369                               &pci_state_reg);
7370
7371         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7372                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7373
7374                 /* If this is a 5700 BX chipset, and we are in PCI-X
7375                  * mode, enable register write workaround.
7376                  *
7377                  * The workaround is to use indirect register accesses
7378                  * for all chip writes not to mailbox registers.
7379                  */
7380                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7381                         u32 pm_reg;
7382                         u16 pci_cmd;
7383
7384                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7385
7386                         /* The chip can have it's power management PCI config
7387                          * space registers clobbered due to this bug.
7388                          * So explicitly force the chip into D0 here.
7389                          */
7390                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7391                                               &pm_reg);
7392                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7393                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7394                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7395                                                pm_reg);
7396
7397                         /* Also, force SERR#/PERR# in PCI command. */
7398                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7399                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7400                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7401                 }
7402         }
7403
7404         /* Back to back register writes can cause problems on this chip,
7405          * the workaround is to read back all reg writes except those to
7406          * mailbox regs.  See tg3_write_indirect_reg32().
7407          *
7408          * PCI Express 5750_A0 rev chips need this workaround too.
7409          */
7410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7411             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7412              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
7413                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
7414
7415         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
7416                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
7417         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
7418                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
7419
7420         /* Chip-specific fixup from Broadcom driver */
7421         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
7422             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
7423                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
7424                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
7425         }
7426
7427         /* Force the chip into D0. */
7428         err = tg3_set_power_state(tp, 0);
7429         if (err) {
7430                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
7431                        pci_name(tp->pdev));
7432                 return err;
7433         }
7434
7435         /* 5700 B0 chips do not support checksumming correctly due
7436          * to hardware bugs.
7437          */
7438         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7439                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7440
7441         /* Pseudo-header checksum is done by hardware logic and not
7442          * the offload processers, so make the chip do the pseudo-
7443          * header checksums on receive.  For transmit it is more
7444          * convenient to do the pseudo-header checksum in software
7445          * as Linux does that on transmit for us in all cases.
7446          */
7447         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7448         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7449
7450         /* Derive initial jumbo mode from MTU assigned in
7451          * ether_setup() via the alloc_etherdev() call
7452          */
7453         if (tp->dev->mtu > ETH_DATA_LEN)
7454                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7455
7456         /* Determine WakeOnLan speed to use. */
7457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7458             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7459             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7460             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7461                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7462         } else {
7463                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7464         }
7465
7466         /* A few boards don't want Ethernet@WireSpeed phy feature */
7467         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7468             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7469              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7470              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7471                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7472
7473         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7474             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7475                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7476         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7477                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7478
7479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7480             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7481                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7482
7483         /* Only 5701 and later support tagged irq status mode.
7484          * Also, 5788 chips cannot use tagged irq status.
7485          *
7486          * However, since we are using NAPI avoid tagged irq status
7487          * because the interrupt condition is more difficult to
7488          * fully clear in that mode.
7489          */
7490         tp->coalesce_mode = 0;
7491
7492         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7493             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7494                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7495
7496         /* Initialize MAC MI mode, polling disabled. */
7497         tw32_f(MAC_MI_MODE, tp->mi_mode);
7498         udelay(80);
7499
7500         /* Initialize data/descriptor byte/word swapping. */
7501         val = tr32(GRC_MODE);
7502         val &= GRC_MODE_HOST_STACKUP;
7503         tw32(GRC_MODE, val | tp->grc_mode);
7504
7505         tg3_switch_clocks(tp);
7506
7507         /* Clear this out for sanity. */
7508         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7509
7510         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7511                               &pci_state_reg);
7512         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7513             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7514                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7515
7516                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7517                     chiprevid == CHIPREV_ID_5701_B0 ||
7518                     chiprevid == CHIPREV_ID_5701_B2 ||
7519                     chiprevid == CHIPREV_ID_5701_B5) {
7520                         unsigned long sram_base;
7521
7522                         /* Write some dummy words into the SRAM status block
7523                          * area, see if it reads back correctly.  If the return
7524                          * value is bad, force enable the PCIX workaround.
7525                          */
7526                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7527
7528                         writel(0x00000000, sram_base);
7529                         writel(0x00000000, sram_base + 4);
7530                         writel(0xffffffff, sram_base + 4);
7531                         if (readl(sram_base) != 0x00000000)
7532                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7533                 }
7534         }
7535
7536         udelay(50);
7537         tg3_nvram_init(tp);
7538
7539         /* Determine if TX descriptors will reside in
7540          * main memory or in the chip SRAM.
7541          */
7542         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
7543             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7545                 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7546
7547         grc_misc_cfg = tr32(GRC_MISC_CFG);
7548         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7549
7550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7551             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7552                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7553                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7554         }
7555
7556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7557             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7558              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7559                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7560
7561         /* these are limited to 10/100 only */
7562         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7563              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7564             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7565              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7566              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7567               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7568               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)))
7569                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7570
7571         err = tg3_phy_probe(tp);
7572         if (err) {
7573                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7574                        pci_name(tp->pdev), err);
7575                 /* ... but do not return immediately ... */
7576         }
7577
7578         tg3_read_partno(tp);
7579
7580         if (tp->phy_id == PHY_ID_SERDES) {
7581                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7582         } else {
7583                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7584                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7585                 else
7586                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7587         }
7588
7589         /* 5700 {AX,BX} chips have a broken status block link
7590          * change bit implementation, so we must use the
7591          * status register in those cases.
7592          */
7593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7594                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7595         else
7596                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7597
7598         /* The led_ctrl is set during tg3_phy_probe, here we might
7599          * have to force the link status polling mechanism based
7600          * upon subsystem IDs.
7601          */
7602         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7603             tp->phy_id != PHY_ID_SERDES) {
7604                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7605                                   TG3_FLAG_USE_LINKCHG_REG);
7606         }
7607
7608         /* For all SERDES we poll the MAC status register. */
7609         if (tp->phy_id == PHY_ID_SERDES)
7610                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7611         else
7612                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7613
7614         /* 5700 BX chips need to have their TX producer index mailboxes
7615          * written twice to workaround a bug.
7616          */
7617         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7618                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7619         else
7620                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7621
7622         /* 5700 chips can get confused if TX buffers straddle the
7623          * 4GB address boundary in some cases.
7624          */
7625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7626                 tp->dev->hard_start_xmit = tg3_start_xmit_4gbug;
7627         else
7628                 tp->dev->hard_start_xmit = tg3_start_xmit;
7629
7630         tp->rx_offset = 2;
7631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7632             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7633                 tp->rx_offset = 0;
7634
7635         /* By default, disable wake-on-lan.  User can change this
7636          * using ETHTOOL_SWOL.
7637          */
7638         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7639
7640         return err;
7641 }
7642
7643 #ifdef CONFIG_SPARC64
7644 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7645 {
7646         struct net_device *dev = tp->dev;
7647         struct pci_dev *pdev = tp->pdev;
7648         struct pcidev_cookie *pcp = pdev->sysdata;
7649
7650         if (pcp != NULL) {
7651                 int node = pcp->prom_node;
7652
7653                 if (prom_getproplen(node, "local-mac-address") == 6) {
7654                         prom_getproperty(node, "local-mac-address",
7655                                          dev->dev_addr, 6);
7656                         return 0;
7657                 }
7658         }
7659         return -ENODEV;
7660 }
7661
7662 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7663 {
7664         struct net_device *dev = tp->dev;
7665
7666         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7667         return 0;
7668 }
7669 #endif
7670
7671 static int __devinit tg3_get_device_address(struct tg3 *tp)
7672 {
7673         struct net_device *dev = tp->dev;
7674         u32 hi, lo, mac_offset;
7675
7676 #ifdef CONFIG_SPARC64
7677         if (!tg3_get_macaddr_sparc(tp))
7678                 return 0;
7679 #endif
7680
7681         mac_offset = 0x7c;
7682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7683             !(tp->tg3_flags & TG3_FLG2_SUN_5704)) {
7684                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
7685                         mac_offset = 0xcc;
7686                 if (tg3_nvram_lock(tp))
7687                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
7688                 else
7689                         tg3_nvram_unlock(tp);
7690         }
7691
7692         /* First try to get it from MAC address mailbox. */
7693         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7694         if ((hi >> 16) == 0x484b) {
7695                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7696                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7697
7698                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7699                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7700                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7701                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7702                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7703         }
7704         /* Next, try NVRAM. */
7705         else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7706                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7707                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7708                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7709                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7710                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7711                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7712                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7713                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7714         }
7715         /* Finally just fetch it out of the MAC control regs. */
7716         else {
7717                 hi = tr32(MAC_ADDR_0_HIGH);
7718                 lo = tr32(MAC_ADDR_0_LOW);
7719
7720                 dev->dev_addr[5] = lo & 0xff;
7721                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7722                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7723                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7724                 dev->dev_addr[1] = hi & 0xff;
7725                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7726         }
7727
7728         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7729 #ifdef CONFIG_SPARC64
7730                 if (!tg3_get_default_macaddr_sparc(tp))
7731                         return 0;
7732 #endif
7733                 return -EINVAL;
7734         }
7735         return 0;
7736 }
7737
7738 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7739 {
7740         struct tg3_internal_buffer_desc test_desc;
7741         u32 sram_dma_descs;
7742         int i, ret;
7743
7744         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7745
7746         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7747         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7748         tw32(RDMAC_STATUS, 0);
7749         tw32(WDMAC_STATUS, 0);
7750
7751         tw32(BUFMGR_MODE, 0);
7752         tw32(FTQ_RESET, 0);
7753
7754         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7755         test_desc.addr_lo = buf_dma & 0xffffffff;
7756         test_desc.nic_mbuf = 0x00002100;
7757         test_desc.len = size;
7758
7759         /*
7760          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7761          * the *second* time the tg3 driver was getting loaded after an
7762          * initial scan.
7763          *
7764          * Broadcom tells me:
7765          *   ...the DMA engine is connected to the GRC block and a DMA
7766          *   reset may affect the GRC block in some unpredictable way...
7767          *   The behavior of resets to individual blocks has not been tested.
7768          *
7769          * Broadcom noted the GRC reset will also reset all sub-components.
7770          */
7771         if (to_device) {
7772                 test_desc.cqid_sqid = (13 << 8) | 2;
7773
7774                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7775                 udelay(40);
7776         } else {
7777                 test_desc.cqid_sqid = (16 << 8) | 7;
7778
7779                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7780                 udelay(40);
7781         }
7782         test_desc.flags = 0x00000005;
7783
7784         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7785                 u32 val;
7786
7787                 val = *(((u32 *)&test_desc) + i);
7788                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7789                                        sram_dma_descs + (i * sizeof(u32)));
7790                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7791         }
7792         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7793
7794         if (to_device) {
7795                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7796         } else {
7797                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7798         }
7799
7800         ret = -ENODEV;
7801         for (i = 0; i < 40; i++) {
7802                 u32 val;
7803
7804                 if (to_device)
7805                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7806                 else
7807                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7808                 if ((val & 0xffff) == sram_dma_descs) {
7809                         ret = 0;
7810                         break;
7811                 }
7812
7813                 udelay(100);
7814         }
7815
7816         return ret;
7817 }
7818
7819 #define TEST_BUFFER_SIZE        0x400
7820
7821 static int __devinit tg3_test_dma(struct tg3 *tp)
7822 {
7823         dma_addr_t buf_dma;
7824         u32 *buf;
7825         int ret;
7826
7827         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7828         if (!buf) {
7829                 ret = -ENOMEM;
7830                 goto out_nofree;
7831         }
7832
7833         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7834                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
7835
7836 #ifndef CONFIG_X86
7837         {
7838                 u8 byte;
7839                 int cacheline_size;
7840                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7841
7842                 if (byte == 0)
7843                         cacheline_size = 1024;
7844                 else
7845                         cacheline_size = (int) byte * 4;
7846
7847                 switch (cacheline_size) {
7848                 case 16:
7849                 case 32:
7850                 case 64:
7851                 case 128:
7852                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7853                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7854                                 tp->dma_rwctrl |=
7855                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
7856                                 break;
7857                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7858                                 tp->dma_rwctrl &=
7859                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
7860                                 tp->dma_rwctrl |=
7861                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
7862                                 break;
7863                         }
7864                         /* fallthrough */
7865                 case 256:
7866                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
7867                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7868                                 tp->dma_rwctrl |=
7869                                         DMA_RWCTRL_WRITE_BNDRY_256;
7870                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7871                                 tp->dma_rwctrl |=
7872                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
7873                 };
7874         }
7875 #endif
7876
7877         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7878                 tp->dma_rwctrl |= 0x001f0000;
7879         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7880                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
7881                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7882                         tp->dma_rwctrl |= 0x003f0000;
7883                 else
7884                         tp->dma_rwctrl |= 0x003f000f;
7885         } else {
7886                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7887                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7888                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7889
7890                         if (ccval == 0x6 || ccval == 0x7)
7891                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7892
7893                         /* Set bit 23 to renable PCIX hw bug fix */
7894                         tp->dma_rwctrl |= 0x009f0000;
7895                 } else {
7896                         tp->dma_rwctrl |= 0x001b000f;
7897                 }
7898         }
7899
7900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7902                 tp->dma_rwctrl &= 0xfffffff0;
7903
7904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7906                 /* Remove this if it causes problems for some boards. */
7907                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7908
7909                 /* On 5700/5701 chips, we need to set this bit.
7910                  * Otherwise the chip will issue cacheline transactions
7911                  * to streamable DMA memory with not all the byte
7912                  * enables turned on.  This is an error on several
7913                  * RISC PCI controllers, in particular sparc64.
7914                  *
7915                  * On 5703/5704 chips, this bit has been reassigned
7916                  * a different meaning.  In particular, it is used
7917                  * on those chips to enable a PCI-X workaround.
7918                  */
7919                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7920         }
7921
7922         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7923
7924 #if 0
7925         /* Unneeded, already done by tg3_get_invariants.  */
7926         tg3_switch_clocks(tp);
7927 #endif
7928
7929         ret = 0;
7930         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7931             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7932                 goto out;
7933
7934         while (1) {
7935                 u32 *p = buf, i;
7936
7937                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7938                         p[i] = i;
7939
7940                 /* Send the buffer to the chip. */
7941                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7942                 if (ret) {
7943                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7944                         break;
7945                 }
7946
7947 #if 0
7948                 /* validate data reached card RAM correctly. */
7949                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7950                         u32 val;
7951                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7952                         if (le32_to_cpu(val) != p[i]) {
7953                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
7954                                 /* ret = -ENODEV here? */
7955                         }
7956                         p[i] = 0;
7957                 }
7958 #endif
7959                 /* Now read it back. */
7960                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7961                 if (ret) {
7962                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7963
7964                         break;
7965                 }
7966
7967                 /* Verify it. */
7968                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7969                         if (p[i] == i)
7970                                 continue;
7971
7972                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7973                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7974                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7975                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7976                                 break;
7977                         } else {
7978                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7979                                 ret = -ENODEV;
7980                                 goto out;
7981                         }
7982                 }
7983
7984                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
7985                         /* Success. */
7986                         ret = 0;
7987                         break;
7988                 }
7989         }
7990
7991 out:
7992         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
7993 out_nofree:
7994         return ret;
7995 }
7996
7997 static void __devinit tg3_init_link_config(struct tg3 *tp)
7998 {
7999         tp->link_config.advertising =
8000                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8001                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8002                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8003                  ADVERTISED_Autoneg | ADVERTISED_MII);
8004         tp->link_config.speed = SPEED_INVALID;
8005         tp->link_config.duplex = DUPLEX_INVALID;
8006         tp->link_config.autoneg = AUTONEG_ENABLE;
8007         netif_carrier_off(tp->dev);
8008         tp->link_config.active_speed = SPEED_INVALID;
8009         tp->link_config.active_duplex = DUPLEX_INVALID;
8010         tp->link_config.phy_is_low_power = 0;
8011         tp->link_config.orig_speed = SPEED_INVALID;
8012         tp->link_config.orig_duplex = DUPLEX_INVALID;
8013         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8014 }
8015
8016 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8017 {
8018         tp->bufmgr_config.mbuf_read_dma_low_water =
8019                 DEFAULT_MB_RDMA_LOW_WATER;
8020         tp->bufmgr_config.mbuf_mac_rx_low_water =
8021                 DEFAULT_MB_MACRX_LOW_WATER;
8022         tp->bufmgr_config.mbuf_high_water =
8023                 DEFAULT_MB_HIGH_WATER;
8024
8025         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8026                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8027         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8028                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8029         tp->bufmgr_config.mbuf_high_water_jumbo =
8030                 DEFAULT_MB_HIGH_WATER_JUMBO;
8031
8032         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8033         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8034 }
8035
8036 static char * __devinit tg3_phy_string(struct tg3 *tp)
8037 {
8038         switch (tp->phy_id & PHY_ID_MASK) {
8039         case PHY_ID_BCM5400:    return "5400";
8040         case PHY_ID_BCM5401:    return "5401";
8041         case PHY_ID_BCM5411:    return "5411";
8042         case PHY_ID_BCM5701:    return "5701";
8043         case PHY_ID_BCM5703:    return "5703";
8044         case PHY_ID_BCM5704:    return "5704";
8045         case PHY_ID_BCM5705:    return "5705";
8046         case PHY_ID_BCM5750:    return "5750";
8047         case PHY_ID_BCM8002:    return "8002";
8048         case PHY_ID_SERDES:     return "serdes";
8049         default:                return "unknown";
8050         };
8051 }
8052
8053 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8054 {
8055         struct pci_dev *peer;
8056         unsigned int func, devnr = tp->pdev->devfn & ~7;
8057
8058         for (func = 0; func < 8; func++) {
8059                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8060                 if (peer && peer != tp->pdev)
8061                         break;
8062                 pci_dev_put(peer);
8063         }
8064         if (!peer || peer == tp->pdev)
8065                 BUG();
8066
8067         /*
8068          * We don't need to keep the refcount elevated; there's no way
8069          * to remove one half of this device without removing the other
8070          */
8071         pci_dev_put(peer);
8072
8073         return peer;
8074 }
8075
8076 static int __devinit tg3_init_one(struct pci_dev *pdev,
8077                                   const struct pci_device_id *ent)
8078 {
8079         static int tg3_version_printed = 0;
8080         unsigned long tg3reg_base, tg3reg_len;
8081         struct net_device *dev;
8082         struct tg3 *tp;
8083         int i, err, pci_using_dac, pm_cap;
8084
8085         if (tg3_version_printed++ == 0)
8086                 printk(KERN_INFO "%s", version);
8087
8088         err = pci_enable_device(pdev);
8089         if (err) {
8090                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8091                        "aborting.\n");
8092                 return err;
8093         }
8094
8095         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8096                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8097                        "base address, aborting.\n");
8098                 err = -ENODEV;
8099                 goto err_out_disable_pdev;
8100         }
8101
8102         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8103         if (err) {
8104                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8105                        "aborting.\n");
8106                 goto err_out_disable_pdev;
8107         }
8108
8109         pci_set_master(pdev);
8110
8111         /* Find power-management capability. */
8112         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8113         if (pm_cap == 0) {
8114                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8115                        "aborting.\n");
8116                 err = -EIO;
8117                 goto err_out_free_res;
8118         }
8119
8120         /* Configure DMA attributes. */
8121         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8122         if (!err) {
8123                 pci_using_dac = 1;
8124                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8125                 if (err < 0) {
8126                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8127                                "for consistent allocations\n");
8128                         goto err_out_free_res;
8129                 }
8130         } else {
8131                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8132                 if (err) {
8133                         printk(KERN_ERR PFX "No usable DMA configuration, "
8134                                "aborting.\n");
8135                         goto err_out_free_res;
8136                 }
8137                 pci_using_dac = 0;
8138         }
8139
8140         tg3reg_base = pci_resource_start(pdev, 0);
8141         tg3reg_len = pci_resource_len(pdev, 0);
8142
8143         dev = alloc_etherdev(sizeof(*tp));
8144         if (!dev) {
8145                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8146                 err = -ENOMEM;
8147                 goto err_out_free_res;
8148         }
8149
8150         SET_MODULE_OWNER(dev);
8151         SET_NETDEV_DEV(dev, &pdev->dev);
8152
8153         if (pci_using_dac)
8154                 dev->features |= NETIF_F_HIGHDMA;
8155 #if TG3_VLAN_TAG_USED
8156         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8157         dev->vlan_rx_register = tg3_vlan_rx_register;
8158         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8159 #endif
8160
8161         tp = netdev_priv(dev);
8162         tp->pdev = pdev;
8163         tp->dev = dev;
8164         tp->pm_cap = pm_cap;
8165         tp->mac_mode = TG3_DEF_MAC_MODE;
8166         tp->rx_mode = TG3_DEF_RX_MODE;
8167         tp->tx_mode = TG3_DEF_TX_MODE;
8168         tp->mi_mode = MAC_MI_MODE_BASE;
8169         if (tg3_debug > 0)
8170                 tp->msg_enable = tg3_debug;
8171         else
8172                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8173
8174         /* The word/byte swap controls here control register access byte
8175          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8176          * setting below.
8177          */
8178         tp->misc_host_ctrl =
8179                 MISC_HOST_CTRL_MASK_PCI_INT |
8180                 MISC_HOST_CTRL_WORD_SWAP |
8181                 MISC_HOST_CTRL_INDIR_ACCESS |
8182                 MISC_HOST_CTRL_PCISTATE_RW;
8183
8184         /* The NONFRM (non-frame) byte/word swap controls take effect
8185          * on descriptor entries, anything which isn't packet data.
8186          *
8187          * The StrongARM chips on the board (one for tx, one for rx)
8188          * are running in big-endian mode.
8189          */
8190         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8191                         GRC_MODE_WSWAP_NONFRM_DATA);
8192 #ifdef __BIG_ENDIAN
8193         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8194 #endif
8195         spin_lock_init(&tp->lock);
8196         spin_lock_init(&tp->tx_lock);
8197         spin_lock_init(&tp->indirect_lock);
8198         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8199
8200         tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
8201         if (tp->regs == 0UL) {
8202                 printk(KERN_ERR PFX "Cannot map device registers, "
8203                        "aborting.\n");
8204                 err = -ENOMEM;
8205                 goto err_out_free_dev;
8206         }
8207
8208         tg3_init_link_config(tp);
8209
8210         tg3_init_bufmgr_config(tp);
8211
8212         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8213         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8214         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8215
8216         dev->open = tg3_open;
8217         dev->stop = tg3_close;
8218         dev->get_stats = tg3_get_stats;
8219         dev->set_multicast_list = tg3_set_rx_mode;
8220         dev->set_mac_address = tg3_set_mac_addr;
8221         dev->do_ioctl = tg3_ioctl;
8222         dev->tx_timeout = tg3_tx_timeout;
8223         dev->poll = tg3_poll;
8224         dev->ethtool_ops = &tg3_ethtool_ops;
8225         dev->weight = 64;
8226         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8227         dev->change_mtu = tg3_change_mtu;
8228         dev->irq = pdev->irq;
8229 #ifdef CONFIG_NET_POLL_CONTROLLER
8230         dev->poll_controller = tg3_poll_controller;
8231 #endif
8232
8233         err = tg3_get_invariants(tp);
8234         if (err) {
8235                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8236                        "aborting.\n");
8237                 goto err_out_iounmap;
8238         }
8239
8240         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8241             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8242                 tp->bufmgr_config.mbuf_read_dma_low_water =
8243                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8244                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8245                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8246                 tp->bufmgr_config.mbuf_high_water =
8247                         DEFAULT_MB_HIGH_WATER_5705;
8248         }
8249
8250 #if TG3_TSO_SUPPORT != 0
8251         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8252             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8253             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8254             ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
8255              GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
8256                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8257         } else {
8258                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8259         }
8260
8261         /* TSO is off by default, user can enable using ethtool.  */
8262 #if 0
8263         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8264                 dev->features |= NETIF_F_TSO;
8265 #endif
8266
8267 #endif
8268
8269         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8270             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8271             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8272                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8273                 tp->rx_pending = 63;
8274         }
8275
8276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8277                 tp->pdev_peer = tg3_find_5704_peer(tp);
8278
8279         err = tg3_get_device_address(tp);
8280         if (err) {
8281                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8282                        "aborting.\n");
8283                 goto err_out_iounmap;
8284         }
8285
8286         /*
8287          * Reset chip in case UNDI or EFI driver did not shutdown
8288          * DMA self test will enable WDMAC and we'll see (spurious)
8289          * pending DMA on the PCI bus at that point.
8290          */
8291         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8292             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8293                 pci_save_state(tp->pdev, tp->pci_cfg_state);
8294                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8295                 tg3_halt(tp);
8296         }
8297
8298         err = tg3_test_dma(tp);
8299         if (err) {
8300                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8301                 goto err_out_iounmap;
8302         }
8303
8304         /* Tigon3 can do ipv4 only... and some chips have buggy
8305          * checksumming.
8306          */
8307         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8308                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8309                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8310         } else
8311                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8312
8313         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8314                 dev->features &= ~NETIF_F_HIGHDMA;
8315
8316         err = register_netdev(dev);
8317         if (err) {
8318                 printk(KERN_ERR PFX "Cannot register net device, "
8319                        "aborting.\n");
8320                 goto err_out_iounmap;
8321         }
8322
8323         pci_set_drvdata(pdev, dev);
8324
8325         /* Now that we have fully setup the chip, save away a snapshot
8326          * of the PCI config space.  We need to restore this after
8327          * GRC_MISC_CFG core clock resets and some resume events.
8328          */
8329         pci_save_state(tp->pdev, tp->pci_cfg_state);
8330
8331         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8332                dev->name,
8333                tp->board_part_number,
8334                tp->pci_chip_rev_id,
8335                tg3_phy_string(tp),
8336                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8337                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8338                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8339                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8340                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8341                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8342
8343         for (i = 0; i < 6; i++)
8344                 printk("%2.2x%c", dev->dev_addr[i],
8345                        i == 5 ? '\n' : ':');
8346
8347         printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
8348                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8349                "TSOcap[%d] \n",
8350                dev->name,
8351                (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
8352                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8353                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8354                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8355                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8356                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8357                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8358                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8359
8360         return 0;
8361
8362 err_out_iounmap:
8363         iounmap((void *) tp->regs);
8364
8365 err_out_free_dev:
8366         free_netdev(dev);
8367
8368 err_out_free_res:
8369         pci_release_regions(pdev);
8370
8371 err_out_disable_pdev:
8372         pci_disable_device(pdev);
8373         pci_set_drvdata(pdev, NULL);
8374         return err;
8375 }
8376
8377 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8378 {
8379         struct net_device *dev = pci_get_drvdata(pdev);
8380
8381         if (dev) {
8382                 struct tg3 *tp = netdev_priv(dev);
8383
8384                 unregister_netdev(dev);
8385                 iounmap((void *)tp->regs);
8386                 free_netdev(dev);
8387                 pci_release_regions(pdev);
8388                 pci_disable_device(pdev);
8389                 pci_set_drvdata(pdev, NULL);
8390         }
8391 }
8392
8393 static int tg3_suspend(struct pci_dev *pdev, u32 state)
8394 {
8395         struct net_device *dev = pci_get_drvdata(pdev);
8396         struct tg3 *tp = netdev_priv(dev);
8397         int err;
8398
8399         if (!netif_running(dev))
8400                 return 0;
8401
8402         tg3_netif_stop(tp);
8403
8404         del_timer_sync(&tp->timer);
8405
8406         spin_lock_irq(&tp->lock);
8407         spin_lock(&tp->tx_lock);
8408         tg3_disable_ints(tp);
8409         spin_unlock(&tp->tx_lock);
8410         spin_unlock_irq(&tp->lock);
8411
8412         netif_device_detach(dev);
8413
8414         spin_lock_irq(&tp->lock);
8415         spin_lock(&tp->tx_lock);
8416         tg3_halt(tp);
8417         spin_unlock(&tp->tx_lock);
8418         spin_unlock_irq(&tp->lock);
8419
8420         err = tg3_set_power_state(tp, state);
8421         if (err) {
8422                 spin_lock_irq(&tp->lock);
8423                 spin_lock(&tp->tx_lock);
8424
8425                 tg3_init_hw(tp);
8426
8427                 tp->timer.expires = jiffies + tp->timer_offset;
8428                 add_timer(&tp->timer);
8429
8430                 spin_unlock(&tp->tx_lock);
8431                 spin_unlock_irq(&tp->lock);
8432
8433                 netif_device_attach(dev);
8434                 tg3_netif_start(tp);
8435         }
8436
8437         return err;
8438 }
8439
8440 static int tg3_resume(struct pci_dev *pdev)
8441 {
8442         struct net_device *dev = pci_get_drvdata(pdev);
8443         struct tg3 *tp = netdev_priv(dev);
8444         int err;
8445
8446         if (!netif_running(dev))
8447                 return 0;
8448
8449         err = tg3_set_power_state(tp, 0);
8450         if (err)
8451                 return err;
8452
8453         netif_device_attach(dev);
8454
8455         spin_lock_irq(&tp->lock);
8456         spin_lock(&tp->tx_lock);
8457
8458         tg3_init_hw(tp);
8459
8460         tp->timer.expires = jiffies + tp->timer_offset;
8461         add_timer(&tp->timer);
8462
8463         tg3_enable_ints(tp);
8464
8465         spin_unlock(&tp->tx_lock);
8466         spin_unlock_irq(&tp->lock);
8467
8468         tg3_netif_start(tp);
8469
8470         return 0;
8471 }
8472
8473 static struct pci_driver tg3_driver = {
8474         .name           = DRV_MODULE_NAME,
8475         .id_table       = tg3_pci_tbl,
8476         .probe          = tg3_init_one,
8477         .remove         = __devexit_p(tg3_remove_one),
8478         .suspend        = tg3_suspend,
8479         .resume         = tg3_resume
8480 };
8481
8482 static int __init tg3_init(void)
8483 {
8484         return pci_module_init(&tg3_driver);
8485 }
8486
8487 static void __exit tg3_cleanup(void)
8488 {
8489         pci_unregister_driver(&tg3_driver);
8490 }
8491
8492 module_init(tg3_init);
8493 module_exit(tg3_cleanup);