ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  */
7
8 #include <linux/config.h>
9
10 #include <linux/module.h>
11
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/mii.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ip.h>
27 #include <linux/tcp.h>
28 #include <linux/workqueue.h>
29
30 #include <net/checksum.h>
31
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/byteorder.h>
35 #include <asm/uaccess.h>
36
37 #ifdef CONFIG_SPARC64
38 #include <asm/idprom.h>
39 #include <asm/oplib.h>
40 #include <asm/pbm.h>
41 #endif
42
43 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #define TG3_VLAN_TAG_USED 1
45 #else
46 #define TG3_VLAN_TAG_USED 0
47 #endif
48
49 #ifdef NETIF_F_TSO
50 #define TG3_TSO_SUPPORT 1
51 #else
52 #define TG3_TSO_SUPPORT 0
53 #endif
54
55 #include "tg3.h"
56
57 #define DRV_MODULE_NAME         "tg3"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "3.3"
60 #define DRV_MODULE_RELDATE      "April 27, 2004"
61
62 #define TG3_DEF_MAC_MODE        0
63 #define TG3_DEF_RX_MODE         0
64 #define TG3_DEF_TX_MODE         0
65 #define TG3_DEF_MSG_ENABLE        \
66         (NETIF_MSG_DRV          | \
67          NETIF_MSG_PROBE        | \
68          NETIF_MSG_LINK         | \
69          NETIF_MSG_TIMER        | \
70          NETIF_MSG_IFDOWN       | \
71          NETIF_MSG_IFUP         | \
72          NETIF_MSG_RX_ERR       | \
73          NETIF_MSG_TX_ERR)
74
75 /* length of time before we decide the hardware is borked,
76  * and dev->tx_timeout() should be called to fix the problem
77  */
78 #define TG3_TX_TIMEOUT                  (5 * HZ)
79
80 /* hardware minimum and maximum for a single frame's data payload */
81 #define TG3_MIN_MTU                     60
82 #define TG3_MAX_MTU(tp) \
83         (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 ? 9000 : 1500)
84
85 /* These numbers seem to be hard coded in the NIC firmware somehow.
86  * You can't change the ring sizes, but you can change where you place
87  * them in the NIC onboard memory.
88  */
89 #define TG3_RX_RING_SIZE                512
90 #define TG3_DEF_RX_RING_PENDING         200
91 #define TG3_RX_JUMBO_RING_SIZE          256
92 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
93
94 /* Do not place this n-ring entries value into the tp struct itself,
95  * we really want to expose these constants to GCC so that modulo et
96  * al.  operations are done with shifts and masks instead of with
97  * hw multiply/modulo instructions.  Another solution would be to
98  * replace things like '% foo' with '& (foo - 1)'.
99  */
100 #define TG3_RX_RCB_RING_SIZE(tp)        \
101         (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ? \
102          512 : 1024)
103
104 #define TG3_TX_RING_SIZE                512
105 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
106
107 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
108                                  TG3_RX_RING_SIZE)
109 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
110                                  TG3_RX_JUMBO_RING_SIZE)
111 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
112                                    TG3_RX_RCB_RING_SIZE(tp))
113 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
114                                  TG3_TX_RING_SIZE)
115 #define TX_RING_GAP(TP) \
116         (TG3_TX_RING_SIZE - (TP)->tx_pending)
117 #define TX_BUFFS_AVAIL(TP)                                              \
118         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
119           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
120           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
121 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
122
123 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
124 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
125
126 /* minimum number of free TX descriptors required to wake up TX process */
127 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
128
129 #define TG3_NUM_STATS           25      /* number of ETHTOOL_GSTATS u64's */
130
131 static char version[] __devinitdata =
132         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
133
134 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
135 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
136 MODULE_LICENSE("GPL");
137 MODULE_PARM(tg3_debug, "i");
138 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
139
140 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
141
142 static struct pci_device_id tg3_pci_tbl[] = {
143         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
145         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
147         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
148           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { 0, }
200 };
201
202 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
203
204 struct {
205         char string[ETH_GSTRING_LEN];
206 } ethtool_stats_keys[TG3_NUM_STATS] = {
207         { "rx_fragments" },
208         { "rx_ucast_packets" },
209         { "rx_bcast_packets" },
210         { "rx_fcs_errors" },
211         { "rx_xon_pause_rcvd" },
212         { "rx_xoff_pause_rcvd" },
213         { "rx_mac_ctrl_rcvd" },
214         { "rx_xoff_entered" },
215         { "rx_frame_too_long_errors" },
216         { "rx_jabbers" },
217         { "rx_undersize_packets" },
218         { "rx_in_length_errors" },
219         { "rx_out_length_errors" },
220
221         { "tx_xon_sent" },
222         { "tx_xoff_sent" },
223         { "tx_flow_control" },
224         { "tx_mac_errors" },
225         { "tx_single_collisions" },
226         { "tx_mult_collisions" },
227         { "tx_deferred" },
228         { "tx_excessive_collisions" },
229         { "tx_late_collisions" },
230         { "tx_ucast_packets" },
231         { "tx_mcast_packets" },
232         { "tx_bcast_packets" }
233 };
234
235 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
236 {
237         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
238                 unsigned long flags;
239
240                 spin_lock_irqsave(&tp->indirect_lock, flags);
241                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
242                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
243                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
244         } else {
245                 writel(val, tp->regs + off);
246                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
247                         readl(tp->regs + off);
248         }
249 }
250
251 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
252 {
253         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
254                 unsigned long flags;
255
256                 spin_lock_irqsave(&tp->indirect_lock, flags);
257                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
258                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
259                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
260         } else {
261                 unsigned long dest = tp->regs + off;
262                 writel(val, dest);
263                 readl(dest);    /* always flush PCI write */
264         }
265 }
266
267 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
268 {
269         unsigned long mbox = tp->regs + off;
270         writel(val, mbox);
271         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
272                 readl(mbox);
273 }
274
275 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
276 {
277         unsigned long mbox = tp->regs + off;
278         writel(val, mbox);
279         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
280                 writel(val, mbox);
281         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
282                 readl(mbox);
283 }
284
285 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
286 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
287 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
288
289 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
290 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
291 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
292 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
293 #define tr32(reg)               readl(tp->regs + (reg))
294 #define tr16(reg)               readw(tp->regs + (reg))
295 #define tr8(reg)                readb(tp->regs + (reg))
296
297 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
298 {
299         unsigned long flags;
300
301         spin_lock_irqsave(&tp->indirect_lock, flags);
302         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
303         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
304
305         /* Always leave this as zero. */
306         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
307         spin_unlock_irqrestore(&tp->indirect_lock, flags);
308 }
309
310 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
311 {
312         unsigned long flags;
313
314         spin_lock_irqsave(&tp->indirect_lock, flags);
315         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
316         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
317
318         /* Always leave this as zero. */
319         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
320         spin_unlock_irqrestore(&tp->indirect_lock, flags);
321 }
322
323 static void tg3_disable_ints(struct tg3 *tp)
324 {
325         tw32(TG3PCI_MISC_HOST_CTRL,
326              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
327         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
328         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
329 }
330
331 static inline void tg3_cond_int(struct tg3 *tp)
332 {
333         if (tp->hw_status->status & SD_STATUS_UPDATED)
334                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
335 }
336
337 static void tg3_enable_ints(struct tg3 *tp)
338 {
339         tw32(TG3PCI_MISC_HOST_CTRL,
340              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
341         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
342         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
343
344         tg3_cond_int(tp);
345 }
346
347 static inline void tg3_netif_stop(struct tg3 *tp)
348 {
349         netif_poll_disable(tp->dev);
350         netif_tx_disable(tp->dev);
351 }
352
353 static inline void tg3_netif_start(struct tg3 *tp)
354 {
355         netif_wake_queue(tp->dev);
356         /* NOTE: unconditional netif_wake_queue is only appropriate
357          * so long as all callers are assured to have free tx slots
358          * (such as after tg3_init_hw)
359          */
360         netif_poll_enable(tp->dev);
361         tg3_cond_int(tp);
362 }
363
364 static void tg3_switch_clocks(struct tg3 *tp)
365 {
366         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
367         u32 orig_clock_ctrl;
368
369         orig_clock_ctrl = clock_ctrl;
370         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
371                        CLOCK_CTRL_CLKRUN_OENABLE |
372                        0x1f);
373         tp->pci_clock_ctrl = clock_ctrl;
374
375         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
376             (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
377                 tw32_f(TG3PCI_CLOCK_CTRL,
378                      clock_ctrl |
379                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
380                 udelay(40);
381                 tw32_f(TG3PCI_CLOCK_CTRL,
382                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
383                 udelay(40);
384         }
385         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
386         udelay(40);
387 }
388
389 #define PHY_BUSY_LOOPS  5000
390
391 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
392 {
393         u32 frame_val;
394         int loops, ret;
395
396         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
397                 tw32_f(MAC_MI_MODE,
398                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
399                 udelay(40);
400         }
401
402         *val = 0xffffffff;
403
404         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
405                       MI_COM_PHY_ADDR_MASK);
406         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
407                       MI_COM_REG_ADDR_MASK);
408         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
409         
410         tw32_f(MAC_MI_COM, frame_val);
411
412         loops = PHY_BUSY_LOOPS;
413         while (loops-- > 0) {
414                 udelay(10);
415                 frame_val = tr32(MAC_MI_COM);
416
417                 if ((frame_val & MI_COM_BUSY) == 0) {
418                         udelay(5);
419                         frame_val = tr32(MAC_MI_COM);
420                         break;
421                 }
422         }
423
424         ret = -EBUSY;
425         if (loops > 0) {
426                 *val = frame_val & MI_COM_DATA_MASK;
427                 ret = 0;
428         }
429
430         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
431                 tw32_f(MAC_MI_MODE, tp->mi_mode);
432                 udelay(40);
433         }
434
435         return ret;
436 }
437
438 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
439 {
440         u32 frame_val;
441         int loops, ret;
442
443         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
444                 tw32_f(MAC_MI_MODE,
445                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
446                 udelay(40);
447         }
448
449         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
450                       MI_COM_PHY_ADDR_MASK);
451         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
452                       MI_COM_REG_ADDR_MASK);
453         frame_val |= (val & MI_COM_DATA_MASK);
454         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
455         
456         tw32_f(MAC_MI_COM, frame_val);
457
458         loops = PHY_BUSY_LOOPS;
459         while (loops-- > 0) {
460                 udelay(10);
461                 frame_val = tr32(MAC_MI_COM);
462                 if ((frame_val & MI_COM_BUSY) == 0) {
463                         udelay(5);
464                         frame_val = tr32(MAC_MI_COM);
465                         break;
466                 }
467         }
468
469         ret = -EBUSY;
470         if (loops > 0)
471                 ret = 0;
472
473         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
474                 tw32_f(MAC_MI_MODE, tp->mi_mode);
475                 udelay(40);
476         }
477
478         return ret;
479 }
480
481 static void tg3_phy_set_wirespeed(struct tg3 *tp)
482 {
483         u32 val;
484
485         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
486                 return;
487
488         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
489         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
490         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
491 }
492
493 static int tg3_bmcr_reset(struct tg3 *tp)
494 {
495         u32 phy_control;
496         int limit, err;
497
498         /* OK, reset it, and poll the BMCR_RESET bit until it
499          * clears or we time out.
500          */
501         phy_control = BMCR_RESET;
502         err = tg3_writephy(tp, MII_BMCR, phy_control);
503         if (err != 0)
504                 return -EBUSY;
505
506         limit = 5000;
507         while (limit--) {
508                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
509                 if (err != 0)
510                         return -EBUSY;
511
512                 if ((phy_control & BMCR_RESET) == 0) {
513                         udelay(40);
514                         break;
515                 }
516                 udelay(10);
517         }
518         if (limit <= 0)
519                 return -EBUSY;
520
521         return 0;
522 }
523
524 static int tg3_wait_macro_done(struct tg3 *tp)
525 {
526         int limit = 100;
527
528         while (limit--) {
529                 u32 tmp32;
530
531                 tg3_readphy(tp, 0x16, &tmp32);
532                 if ((tmp32 & 0x1000) == 0)
533                         break;
534         }
535         if (limit <= 0)
536                 return -EBUSY;
537
538         return 0;
539 }
540
541 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
542 {
543         static const u32 test_pat[4][6] = {
544         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
545         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
546         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
547         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
548         };
549         int chan;
550
551         for (chan = 0; chan < 4; chan++) {
552                 int i;
553
554                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
555                              (chan * 0x2000) | 0x0200);
556                 tg3_writephy(tp, 0x16, 0x0002);
557
558                 for (i = 0; i < 6; i++)
559                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
560                                      test_pat[chan][i]);
561
562                 tg3_writephy(tp, 0x16, 0x0202);
563                 if (tg3_wait_macro_done(tp)) {
564                         *resetp = 1;
565                         return -EBUSY;
566                 }
567
568                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
569                              (chan * 0x2000) | 0x0200);
570                 tg3_writephy(tp, 0x16, 0x0082);
571                 if (tg3_wait_macro_done(tp)) {
572                         *resetp = 1;
573                         return -EBUSY;
574                 }
575
576                 tg3_writephy(tp, 0x16, 0x0802);
577                 if (tg3_wait_macro_done(tp)) {
578                         *resetp = 1;
579                         return -EBUSY;
580                 }
581
582                 for (i = 0; i < 6; i += 2) {
583                         u32 low, high;
584
585                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
586                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
587                         if (tg3_wait_macro_done(tp)) {
588                                 *resetp = 1;
589                                 return -EBUSY;
590                         }
591                         low &= 0x7fff;
592                         high &= 0x000f;
593                         if (low != test_pat[chan][i] ||
594                             high != test_pat[chan][i+1]) {
595                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
596                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
597                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
598
599                                 return -EBUSY;
600                         }
601                 }
602         }
603
604         return 0;
605 }
606
607 static int tg3_phy_reset_chanpat(struct tg3 *tp)
608 {
609         int chan;
610
611         for (chan = 0; chan < 4; chan++) {
612                 int i;
613
614                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
615                              (chan * 0x2000) | 0x0200);
616                 tg3_writephy(tp, 0x16, 0x0002);
617                 for (i = 0; i < 6; i++)
618                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
619                 tg3_writephy(tp, 0x16, 0x0202);
620                 if (tg3_wait_macro_done(tp))
621                         return -EBUSY;
622         }
623
624         return 0;
625 }
626
627 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
628 {
629         u32 reg32, phy9_orig;
630         int retries, do_phy_reset, err;
631
632         retries = 10;
633         do_phy_reset = 1;
634         do {
635                 if (do_phy_reset) {
636                         err = tg3_bmcr_reset(tp);
637                         if (err)
638                                 return err;
639                         do_phy_reset = 0;
640                 }
641
642                 /* Disable transmitter and interrupt.  */
643                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
644                 reg32 |= 0x3000;
645                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
646
647                 /* Set full-duplex, 1000 mbps.  */
648                 tg3_writephy(tp, MII_BMCR,
649                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
650
651                 /* Set to master mode.  */
652                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
653                 tg3_writephy(tp, MII_TG3_CTRL,
654                              (MII_TG3_CTRL_AS_MASTER |
655                               MII_TG3_CTRL_ENABLE_AS_MASTER));
656
657                 /* Enable SM_DSP_CLOCK and 6dB.  */
658                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
659
660                 /* Block the PHY control access.  */
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
662                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
663
664                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
665                 if (!err)
666                         break;
667         } while (--retries);
668
669         err = tg3_phy_reset_chanpat(tp);
670         if (err)
671                 return err;
672
673         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
674         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
675
676         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
677         tg3_writephy(tp, 0x16, 0x0000);
678
679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
681                 /* Set Extended packet length bit for jumbo frames */
682                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
683         }
684         else {
685                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
686         }
687
688         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
689
690         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
691         reg32 &= ~0x3000;
692         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
693
694         return err;
695 }
696
697 /* This will reset the tigon3 PHY if there is no valid
698  * link unless the FORCE argument is non-zero.
699  */
700 static int tg3_phy_reset(struct tg3 *tp)
701 {
702         u32 phy_status;
703         int err;
704
705         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
706         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
707         if (err != 0)
708                 return -EBUSY;
709
710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
713                 err = tg3_phy_reset_5703_4_5(tp);
714                 if (err)
715                         return err;
716                 goto out;
717         }
718
719         err = tg3_bmcr_reset(tp);
720         if (err)
721                 return err;
722
723 out:
724         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
725                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
726                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
727                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
728                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
729                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
730                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
731         }
732         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
733                 tg3_writephy(tp, 0x1c, 0x8d68);
734                 tg3_writephy(tp, 0x1c, 0x8d68);
735         }
736         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
737                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
738                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
739                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
740                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
741                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
742                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
743                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
744                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
745         }
746         /* Set Extended packet length bit (bit 14) on all chips that */
747         /* support jumbo frames */
748         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
749                 /* Cannot do read-modify-write on 5401 */
750                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
751         }
752         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
753                 u32 phy_reg;
754
755                 /* Set bit 14 with read-modify-write to preserve other bits */
756                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
757                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
758                 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
759         }
760         tg3_phy_set_wirespeed(tp);
761         return 0;
762 }
763
764 static void tg3_frob_aux_power(struct tg3 *tp)
765 {
766         struct tg3 *tp_peer = tp;
767
768         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
769                 return;
770
771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
772                 tp_peer = pci_get_drvdata(tp->pdev_peer);
773                 if (!tp_peer)
774                         BUG();
775         }
776
777
778         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
779             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
780                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
781                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
782                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
783                              (GRC_LCLCTRL_GPIO_OE0 |
784                               GRC_LCLCTRL_GPIO_OE1 |
785                               GRC_LCLCTRL_GPIO_OE2 |
786                               GRC_LCLCTRL_GPIO_OUTPUT0 |
787                               GRC_LCLCTRL_GPIO_OUTPUT1));
788                         udelay(100);
789                 } else {
790                         if (tp_peer != tp &&
791                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
792                                 return;
793
794                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
795                              (GRC_LCLCTRL_GPIO_OE0 |
796                               GRC_LCLCTRL_GPIO_OE1 |
797                               GRC_LCLCTRL_GPIO_OE2 |
798                               GRC_LCLCTRL_GPIO_OUTPUT1 |
799                               GRC_LCLCTRL_GPIO_OUTPUT2));
800                         udelay(100);
801
802                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
803                              (GRC_LCLCTRL_GPIO_OE0 |
804                               GRC_LCLCTRL_GPIO_OE1 |
805                               GRC_LCLCTRL_GPIO_OE2 |
806                               GRC_LCLCTRL_GPIO_OUTPUT0 |
807                               GRC_LCLCTRL_GPIO_OUTPUT1 |
808                               GRC_LCLCTRL_GPIO_OUTPUT2));
809                         udelay(100);
810
811                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
812                              (GRC_LCLCTRL_GPIO_OE0 |
813                               GRC_LCLCTRL_GPIO_OE1 |
814                               GRC_LCLCTRL_GPIO_OE2 |
815                               GRC_LCLCTRL_GPIO_OUTPUT0 |
816                               GRC_LCLCTRL_GPIO_OUTPUT1));
817                         udelay(100);
818                 }
819         } else {
820                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
821                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
822                         if (tp_peer != tp &&
823                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
824                                 return;
825
826                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
827                              (GRC_LCLCTRL_GPIO_OE1 |
828                               GRC_LCLCTRL_GPIO_OUTPUT1));
829                         udelay(100);
830
831                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
832                              (GRC_LCLCTRL_GPIO_OE1));
833                         udelay(100);
834
835                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
836                              (GRC_LCLCTRL_GPIO_OE1 |
837                               GRC_LCLCTRL_GPIO_OUTPUT1));
838                         udelay(100);
839                 }
840         }
841 }
842
843 static int tg3_setup_phy(struct tg3 *, int);
844
845 static int tg3_set_power_state(struct tg3 *tp, int state)
846 {
847         u32 misc_host_ctrl;
848         u16 power_control, power_caps;
849         int pm = tp->pm_cap;
850
851         /* Make sure register accesses (indirect or otherwise)
852          * will function correctly.
853          */
854         pci_write_config_dword(tp->pdev,
855                                TG3PCI_MISC_HOST_CTRL,
856                                tp->misc_host_ctrl);
857
858         pci_read_config_word(tp->pdev,
859                              pm + PCI_PM_CTRL,
860                              &power_control);
861         power_control |= PCI_PM_CTRL_PME_STATUS;
862         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
863         switch (state) {
864         case 0:
865                 power_control |= 0;
866                 pci_write_config_word(tp->pdev,
867                                       pm + PCI_PM_CTRL,
868                                       power_control);
869                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
870                 udelay(100);
871
872                 return 0;
873
874         case 1:
875                 power_control |= 1;
876                 break;
877
878         case 2:
879                 power_control |= 2;
880                 break;
881
882         case 3:
883                 power_control |= 3;
884                 break;
885
886         default:
887                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
888                        "requested.\n",
889                        tp->dev->name, state);
890                 return -EINVAL;
891         };
892
893         power_control |= PCI_PM_CTRL_PME_ENABLE;
894
895         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
896         tw32(TG3PCI_MISC_HOST_CTRL,
897              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
898
899         if (tp->link_config.phy_is_low_power == 0) {
900                 tp->link_config.phy_is_low_power = 1;
901                 tp->link_config.orig_speed = tp->link_config.speed;
902                 tp->link_config.orig_duplex = tp->link_config.duplex;
903                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
904         }
905
906         if (tp->phy_id != PHY_ID_SERDES) {
907                 tp->link_config.speed = SPEED_10;
908                 tp->link_config.duplex = DUPLEX_HALF;
909                 tp->link_config.autoneg = AUTONEG_ENABLE;
910                 tg3_setup_phy(tp, 0);
911         }
912
913         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
914
915         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
916                 u32 mac_mode;
917
918                 if (tp->phy_id != PHY_ID_SERDES) {
919                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
920                         udelay(40);
921
922                         mac_mode = MAC_MODE_PORT_MODE_MII;
923
924                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
925                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
926                                 mac_mode |= MAC_MODE_LINK_POLARITY;
927                 } else {
928                         mac_mode = MAC_MODE_PORT_MODE_TBI;
929                 }
930
931
932                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
933                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
934                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
935
936                 tw32_f(MAC_MODE, mac_mode);
937                 udelay(100);
938
939                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
940                 udelay(10);
941         }
942
943         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
944             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
945              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
946                 u32 base_val;
947
948                 base_val = tp->pci_clock_ctrl;
949                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
950                              CLOCK_CTRL_TXCLK_DISABLE);
951
952                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
953                      CLOCK_CTRL_ALTCLK |
954                      CLOCK_CTRL_PWRDOWN_PLL133);
955                 udelay(40);
956         } else {
957                 u32 newbits1, newbits2;
958
959                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
960                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
961                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
962                                     CLOCK_CTRL_TXCLK_DISABLE |
963                                     CLOCK_CTRL_ALTCLK);
964                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
965                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
966                         newbits1 = CLOCK_CTRL_625_CORE;
967                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
968                 } else {
969                         newbits1 = CLOCK_CTRL_ALTCLK;
970                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
971                 }
972
973                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
974                 udelay(40);
975
976                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
977                 udelay(40);
978
979                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
980                         u32 newbits3;
981
982                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
983                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
984                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
985                                             CLOCK_CTRL_TXCLK_DISABLE |
986                                             CLOCK_CTRL_44MHZ_CORE);
987                         } else {
988                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
989                         }
990
991                         tw32_f(TG3PCI_CLOCK_CTRL,
992                                          tp->pci_clock_ctrl | newbits3);
993                         udelay(40);
994                 }
995         }
996
997         tg3_frob_aux_power(tp);
998
999         /* Finally, set the new power state. */
1000         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1001
1002         return 0;
1003 }
1004
1005 static void tg3_link_report(struct tg3 *tp)
1006 {
1007         if (!netif_carrier_ok(tp->dev)) {
1008                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1009         } else {
1010                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1011                        tp->dev->name,
1012                        (tp->link_config.active_speed == SPEED_1000 ?
1013                         1000 :
1014                         (tp->link_config.active_speed == SPEED_100 ?
1015                          100 : 10)),
1016                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1017                         "full" : "half"));
1018
1019                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1020                        "%s for RX.\n",
1021                        tp->dev->name,
1022                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1023                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1024         }
1025 }
1026
1027 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1028 {
1029         u32 new_tg3_flags = 0;
1030
1031         if (local_adv & ADVERTISE_PAUSE_CAP) {
1032                 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1033                         if (remote_adv & LPA_PAUSE_CAP)
1034                                 new_tg3_flags |=
1035                                         (TG3_FLAG_RX_PAUSE |
1036                                          TG3_FLAG_TX_PAUSE);
1037                         else if (remote_adv & LPA_PAUSE_ASYM)
1038                                 new_tg3_flags |=
1039                                         (TG3_FLAG_RX_PAUSE);
1040                 } else {
1041                         if (remote_adv & LPA_PAUSE_CAP)
1042                                 new_tg3_flags |=
1043                                         (TG3_FLAG_RX_PAUSE |
1044                                          TG3_FLAG_TX_PAUSE);
1045                 }
1046         } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1047                 if ((remote_adv & LPA_PAUSE_CAP) &&
1048                     (remote_adv & LPA_PAUSE_ASYM))
1049                         new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1050         }
1051
1052         tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1053         tp->tg3_flags |= new_tg3_flags;
1054
1055         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1056                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1057         else
1058                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1059
1060         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1061                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1062         else
1063                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1064 }
1065
1066 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1067 {
1068         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1069         case MII_TG3_AUX_STAT_10HALF:
1070                 *speed = SPEED_10;
1071                 *duplex = DUPLEX_HALF;
1072                 break;
1073
1074         case MII_TG3_AUX_STAT_10FULL:
1075                 *speed = SPEED_10;
1076                 *duplex = DUPLEX_FULL;
1077                 break;
1078
1079         case MII_TG3_AUX_STAT_100HALF:
1080                 *speed = SPEED_100;
1081                 *duplex = DUPLEX_HALF;
1082                 break;
1083
1084         case MII_TG3_AUX_STAT_100FULL:
1085                 *speed = SPEED_100;
1086                 *duplex = DUPLEX_FULL;
1087                 break;
1088
1089         case MII_TG3_AUX_STAT_1000HALF:
1090                 *speed = SPEED_1000;
1091                 *duplex = DUPLEX_HALF;
1092                 break;
1093
1094         case MII_TG3_AUX_STAT_1000FULL:
1095                 *speed = SPEED_1000;
1096                 *duplex = DUPLEX_FULL;
1097                 break;
1098
1099         default:
1100                 *speed = SPEED_INVALID;
1101                 *duplex = DUPLEX_INVALID;
1102                 break;
1103         };
1104 }
1105
1106 static int tg3_phy_copper_begin(struct tg3 *tp)
1107 {
1108         u32 new_adv;
1109         int i;
1110
1111         if (tp->link_config.phy_is_low_power) {
1112                 /* Entering low power mode.  Disable gigabit and
1113                  * 100baseT advertisements.
1114                  */
1115                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1116
1117                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1118                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1119                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1120                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1121
1122                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1123         } else if (tp->link_config.speed == SPEED_INVALID) {
1124                 tp->link_config.advertising =
1125                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1126                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1127                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1128                          ADVERTISED_Autoneg | ADVERTISED_MII);
1129
1130                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1131                         tp->link_config.advertising &=
1132                                 ~(ADVERTISED_1000baseT_Half |
1133                                   ADVERTISED_1000baseT_Full);
1134
1135                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1136                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1137                         new_adv |= ADVERTISE_10HALF;
1138                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1139                         new_adv |= ADVERTISE_10FULL;
1140                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1141                         new_adv |= ADVERTISE_100HALF;
1142                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1143                         new_adv |= ADVERTISE_100FULL;
1144                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1145
1146                 if (tp->link_config.advertising &
1147                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1148                         new_adv = 0;
1149                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1150                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1151                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1152                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1153                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1154                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1155                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1156                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1157                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1158                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1159                 } else {
1160                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1161                 }
1162         } else {
1163                 /* Asking for a specific link mode. */
1164                 if (tp->link_config.speed == SPEED_1000) {
1165                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1166                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1167
1168                         if (tp->link_config.duplex == DUPLEX_FULL)
1169                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1170                         else
1171                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1172                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1173                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1174                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1175                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1176                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1177                 } else {
1178                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1179
1180                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1181                         if (tp->link_config.speed == SPEED_100) {
1182                                 if (tp->link_config.duplex == DUPLEX_FULL)
1183                                         new_adv |= ADVERTISE_100FULL;
1184                                 else
1185                                         new_adv |= ADVERTISE_100HALF;
1186                         } else {
1187                                 if (tp->link_config.duplex == DUPLEX_FULL)
1188                                         new_adv |= ADVERTISE_10FULL;
1189                                 else
1190                                         new_adv |= ADVERTISE_10HALF;
1191                         }
1192                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1193                 }
1194         }
1195
1196         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1197             tp->link_config.speed != SPEED_INVALID) {
1198                 u32 bmcr, orig_bmcr;
1199
1200                 tp->link_config.active_speed = tp->link_config.speed;
1201                 tp->link_config.active_duplex = tp->link_config.duplex;
1202
1203                 bmcr = 0;
1204                 switch (tp->link_config.speed) {
1205                 default:
1206                 case SPEED_10:
1207                         break;
1208
1209                 case SPEED_100:
1210                         bmcr |= BMCR_SPEED100;
1211                         break;
1212
1213                 case SPEED_1000:
1214                         bmcr |= TG3_BMCR_SPEED1000;
1215                         break;
1216                 };
1217
1218                 if (tp->link_config.duplex == DUPLEX_FULL)
1219                         bmcr |= BMCR_FULLDPLX;
1220
1221                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1222                 if (bmcr != orig_bmcr) {
1223                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1224                         for (i = 0; i < 1500; i++) {
1225                                 u32 tmp;
1226
1227                                 udelay(10);
1228                                 tg3_readphy(tp, MII_BMSR, &tmp);
1229                                 tg3_readphy(tp, MII_BMSR, &tmp);
1230                                 if (!(tmp & BMSR_LSTATUS)) {
1231                                         udelay(40);
1232                                         break;
1233                                 }
1234                         }
1235                         tg3_writephy(tp, MII_BMCR, bmcr);
1236                         udelay(40);
1237                 }
1238         } else {
1239                 tg3_writephy(tp, MII_BMCR,
1240                              BMCR_ANENABLE | BMCR_ANRESTART);
1241         }
1242
1243         return 0;
1244 }
1245
1246 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1247 {
1248         int err;
1249
1250         /* Turn off tap power management. */
1251         /* Set Extended packet length bit */
1252         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1253
1254         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1255         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1256
1257         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1258         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1259
1260         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1261         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1262
1263         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1264         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1265
1266         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1267         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1268
1269         udelay(40);
1270
1271         return err;
1272 }
1273
1274 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1275 {
1276         u32 adv_reg, all_mask;
1277
1278         tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
1279         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1280                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1281         if ((adv_reg & all_mask) != all_mask)
1282                 return 0;
1283         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1284                 u32 tg3_ctrl;
1285
1286                 tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
1287                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1288                             MII_TG3_CTRL_ADV_1000_FULL);
1289                 if ((tg3_ctrl & all_mask) != all_mask)
1290                         return 0;
1291         }
1292         return 1;
1293 }
1294
1295 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1296 {
1297         int current_link_up;
1298         u32 bmsr, dummy;
1299         u16 current_speed;
1300         u8 current_duplex;
1301         int i, err;
1302
1303         tw32(MAC_EVENT, 0);
1304
1305         tw32_f(MAC_STATUS,
1306              (MAC_STATUS_SYNC_CHANGED |
1307               MAC_STATUS_CFG_CHANGED |
1308               MAC_STATUS_MI_COMPLETION |
1309               MAC_STATUS_LNKSTATE_CHANGED));
1310         udelay(40);
1311
1312         tp->mi_mode = MAC_MI_MODE_BASE;
1313         tw32_f(MAC_MI_MODE, tp->mi_mode);
1314         udelay(40);
1315
1316         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1317
1318         /* Some third-party PHYs need to be reset on link going
1319          * down.
1320          */
1321         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1322              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1323              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1324             netif_carrier_ok(tp->dev)) {
1325                 tg3_readphy(tp, MII_BMSR, &bmsr);
1326                 tg3_readphy(tp, MII_BMSR, &bmsr);
1327                 if (!(bmsr & BMSR_LSTATUS))
1328                         force_reset = 1;
1329         }
1330         if (force_reset)
1331                 tg3_phy_reset(tp);
1332
1333         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1334                 tg3_readphy(tp, MII_BMSR, &bmsr);
1335                 tg3_readphy(tp, MII_BMSR, &bmsr);
1336
1337                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1338                         bmsr = 0;
1339
1340                 if (!(bmsr & BMSR_LSTATUS)) {
1341                         err = tg3_init_5401phy_dsp(tp);
1342                         if (err)
1343                                 return err;
1344
1345                         tg3_readphy(tp, MII_BMSR, &bmsr);
1346                         for (i = 0; i < 1000; i++) {
1347                                 udelay(10);
1348                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1349                                 if (bmsr & BMSR_LSTATUS) {
1350                                         udelay(40);
1351                                         break;
1352                                 }
1353                         }
1354
1355                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1356                             !(bmsr & BMSR_LSTATUS) &&
1357                             tp->link_config.active_speed == SPEED_1000) {
1358                                 err = tg3_phy_reset(tp);
1359                                 if (!err)
1360                                         err = tg3_init_5401phy_dsp(tp);
1361                                 if (err)
1362                                         return err;
1363                         }
1364                 }
1365         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1366                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1367                 /* 5701 {A0,B0} CRC bug workaround */
1368                 tg3_writephy(tp, 0x15, 0x0a75);
1369                 tg3_writephy(tp, 0x1c, 0x8c68);
1370                 tg3_writephy(tp, 0x1c, 0x8d68);
1371                 tg3_writephy(tp, 0x1c, 0x8c68);
1372         }
1373
1374         /* Clear pending interrupts... */
1375         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1376         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1377
1378         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1379                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1380         else
1381                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1382
1383         if (tp->led_mode == led_mode_three_link)
1384                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1385                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1386         else
1387                 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1388
1389         current_link_up = 0;
1390         current_speed = SPEED_INVALID;
1391         current_duplex = DUPLEX_INVALID;
1392
1393         bmsr = 0;
1394         for (i = 0; i < 100; i++) {
1395                 tg3_readphy(tp, MII_BMSR, &bmsr);
1396                 tg3_readphy(tp, MII_BMSR, &bmsr);
1397                 if (bmsr & BMSR_LSTATUS)
1398                         break;
1399                 udelay(40);
1400         }
1401
1402         if (bmsr & BMSR_LSTATUS) {
1403                 u32 aux_stat, bmcr;
1404
1405                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1406                 for (i = 0; i < 2000; i++) {
1407                         udelay(10);
1408                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1409                         if (aux_stat)
1410                                 break;
1411                 }
1412
1413                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1414                                              &current_speed,
1415                                              &current_duplex);
1416
1417                 bmcr = 0;
1418                 for (i = 0; i < 200; i++) {
1419                         tg3_readphy(tp, MII_BMCR, &bmcr);
1420                         tg3_readphy(tp, MII_BMCR, &bmcr);
1421                         if (bmcr && bmcr != 0x7fff)
1422                                 break;
1423                         udelay(10);
1424                 }
1425
1426                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1427                         if (bmcr & BMCR_ANENABLE) {
1428                                 current_link_up = 1;
1429
1430                                 /* Force autoneg restart if we are exiting
1431                                  * low power mode.
1432                                  */
1433                                 if (!tg3_copper_is_advertising_all(tp))
1434                                         current_link_up = 0;
1435                         } else {
1436                                 current_link_up = 0;
1437                         }
1438                 } else {
1439                         if (!(bmcr & BMCR_ANENABLE) &&
1440                             tp->link_config.speed == current_speed &&
1441                             tp->link_config.duplex == current_duplex) {
1442                                 current_link_up = 1;
1443                         } else {
1444                                 current_link_up = 0;
1445                         }
1446                 }
1447
1448                 tp->link_config.active_speed = current_speed;
1449                 tp->link_config.active_duplex = current_duplex;
1450         }
1451
1452         if (current_link_up == 1 &&
1453             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1454             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1455                 u32 local_adv, remote_adv;
1456
1457                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1458                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1459
1460                 tg3_readphy(tp, MII_LPA, &remote_adv);
1461                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1462
1463                 /* If we are not advertising full pause capability,
1464                  * something is wrong.  Bring the link down and reconfigure.
1465                  */
1466                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1467                         current_link_up = 0;
1468                 } else {
1469                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1470                 }
1471         }
1472
1473         if (current_link_up == 0) {
1474                 u32 tmp;
1475
1476                 tg3_phy_copper_begin(tp);
1477
1478                 tg3_readphy(tp, MII_BMSR, &tmp);
1479                 tg3_readphy(tp, MII_BMSR, &tmp);
1480                 if (tmp & BMSR_LSTATUS)
1481                         current_link_up = 1;
1482         }
1483
1484         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1485         if (current_link_up == 1) {
1486                 if (tp->link_config.active_speed == SPEED_100 ||
1487                     tp->link_config.active_speed == SPEED_10)
1488                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1489                 else
1490                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1491         } else
1492                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1493
1494         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1495         if (tp->link_config.active_duplex == DUPLEX_HALF)
1496                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1497
1498         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1500                 if ((tp->led_mode == led_mode_link10) ||
1501                     (current_link_up == 1 &&
1502                      tp->link_config.active_speed == SPEED_10))
1503                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1504         } else {
1505                 if (current_link_up == 1)
1506                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1507                 tw32(MAC_LED_CTRL, LED_CTRL_PHY_MODE_1);
1508         }
1509
1510         /* ??? Without this setting Netgear GA302T PHY does not
1511          * ??? send/receive packets...
1512          */
1513         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1514             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1515                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1516                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1517                 udelay(40);
1518         }
1519
1520         tw32_f(MAC_MODE, tp->mac_mode);
1521         udelay(40);
1522
1523         if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
1524                 /* Polled via timer. */
1525                 tw32_f(MAC_EVENT, 0);
1526         } else {
1527                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1528         }
1529         udelay(40);
1530
1531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1532             current_link_up == 1 &&
1533             tp->link_config.active_speed == SPEED_1000 &&
1534             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1535              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1536                 udelay(120);
1537                 tw32_f(MAC_STATUS,
1538                      (MAC_STATUS_SYNC_CHANGED |
1539                       MAC_STATUS_CFG_CHANGED));
1540                 udelay(40);
1541                 tg3_write_mem(tp,
1542                               NIC_SRAM_FIRMWARE_MBOX,
1543                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1544         }
1545
1546         if (current_link_up != netif_carrier_ok(tp->dev)) {
1547                 if (current_link_up)
1548                         netif_carrier_on(tp->dev);
1549                 else
1550                         netif_carrier_off(tp->dev);
1551                 tg3_link_report(tp);
1552         }
1553
1554         return 0;
1555 }
1556
1557 struct tg3_fiber_aneginfo {
1558         int state;
1559 #define ANEG_STATE_UNKNOWN              0
1560 #define ANEG_STATE_AN_ENABLE            1
1561 #define ANEG_STATE_RESTART_INIT         2
1562 #define ANEG_STATE_RESTART              3
1563 #define ANEG_STATE_DISABLE_LINK_OK      4
1564 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1565 #define ANEG_STATE_ABILITY_DETECT       6
1566 #define ANEG_STATE_ACK_DETECT_INIT      7
1567 #define ANEG_STATE_ACK_DETECT           8
1568 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1569 #define ANEG_STATE_COMPLETE_ACK         10
1570 #define ANEG_STATE_IDLE_DETECT_INIT     11
1571 #define ANEG_STATE_IDLE_DETECT          12
1572 #define ANEG_STATE_LINK_OK              13
1573 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1574 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1575
1576         u32 flags;
1577 #define MR_AN_ENABLE            0x00000001
1578 #define MR_RESTART_AN           0x00000002
1579 #define MR_AN_COMPLETE          0x00000004
1580 #define MR_PAGE_RX              0x00000008
1581 #define MR_NP_LOADED            0x00000010
1582 #define MR_TOGGLE_TX            0x00000020
1583 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1584 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1585 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1586 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1587 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1588 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1589 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1590 #define MR_TOGGLE_RX            0x00002000
1591 #define MR_NP_RX                0x00004000
1592
1593 #define MR_LINK_OK              0x80000000
1594
1595         unsigned long link_time, cur_time;
1596
1597         u32 ability_match_cfg;
1598         int ability_match_count;
1599
1600         char ability_match, idle_match, ack_match;
1601
1602         u32 txconfig, rxconfig;
1603 #define ANEG_CFG_NP             0x00000080
1604 #define ANEG_CFG_ACK            0x00000040
1605 #define ANEG_CFG_RF2            0x00000020
1606 #define ANEG_CFG_RF1            0x00000010
1607 #define ANEG_CFG_PS2            0x00000001
1608 #define ANEG_CFG_PS1            0x00008000
1609 #define ANEG_CFG_HD             0x00004000
1610 #define ANEG_CFG_FD             0x00002000
1611 #define ANEG_CFG_INVAL          0x00001f06
1612
1613 };
1614 #define ANEG_OK         0
1615 #define ANEG_DONE       1
1616 #define ANEG_TIMER_ENAB 2
1617 #define ANEG_FAILED     -1
1618
1619 #define ANEG_STATE_SETTLE_TIME  10000
1620
1621 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1622                                    struct tg3_fiber_aneginfo *ap)
1623 {
1624         unsigned long delta;
1625         u32 rx_cfg_reg;
1626         int ret;
1627
1628         if (ap->state == ANEG_STATE_UNKNOWN) {
1629                 ap->rxconfig = 0;
1630                 ap->link_time = 0;
1631                 ap->cur_time = 0;
1632                 ap->ability_match_cfg = 0;
1633                 ap->ability_match_count = 0;
1634                 ap->ability_match = 0;
1635                 ap->idle_match = 0;
1636                 ap->ack_match = 0;
1637         }
1638         ap->cur_time++;
1639
1640         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1641                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1642
1643                 if (rx_cfg_reg != ap->ability_match_cfg) {
1644                         ap->ability_match_cfg = rx_cfg_reg;
1645                         ap->ability_match = 0;
1646                         ap->ability_match_count = 0;
1647                 } else {
1648                         if (++ap->ability_match_count > 1) {
1649                                 ap->ability_match = 1;
1650                                 ap->ability_match_cfg = rx_cfg_reg;
1651                         }
1652                 }
1653                 if (rx_cfg_reg & ANEG_CFG_ACK)
1654                         ap->ack_match = 1;
1655                 else
1656                         ap->ack_match = 0;
1657
1658                 ap->idle_match = 0;
1659         } else {
1660                 ap->idle_match = 1;
1661                 ap->ability_match_cfg = 0;
1662                 ap->ability_match_count = 0;
1663                 ap->ability_match = 0;
1664                 ap->ack_match = 0;
1665
1666                 rx_cfg_reg = 0;
1667         }
1668
1669         ap->rxconfig = rx_cfg_reg;
1670         ret = ANEG_OK;
1671
1672         switch(ap->state) {
1673         case ANEG_STATE_UNKNOWN:
1674                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1675                         ap->state = ANEG_STATE_AN_ENABLE;
1676
1677                 /* fallthru */
1678         case ANEG_STATE_AN_ENABLE:
1679                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1680                 if (ap->flags & MR_AN_ENABLE) {
1681                         ap->link_time = 0;
1682                         ap->cur_time = 0;
1683                         ap->ability_match_cfg = 0;
1684                         ap->ability_match_count = 0;
1685                         ap->ability_match = 0;
1686                         ap->idle_match = 0;
1687                         ap->ack_match = 0;
1688
1689                         ap->state = ANEG_STATE_RESTART_INIT;
1690                 } else {
1691                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1692                 }
1693                 break;
1694
1695         case ANEG_STATE_RESTART_INIT:
1696                 ap->link_time = ap->cur_time;
1697                 ap->flags &= ~(MR_NP_LOADED);
1698                 ap->txconfig = 0;
1699                 tw32(MAC_TX_AUTO_NEG, 0);
1700                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1701                 tw32_f(MAC_MODE, tp->mac_mode);
1702                 udelay(40);
1703
1704                 ret = ANEG_TIMER_ENAB;
1705                 ap->state = ANEG_STATE_RESTART;
1706
1707                 /* fallthru */
1708         case ANEG_STATE_RESTART:
1709                 delta = ap->cur_time - ap->link_time;
1710                 if (delta > ANEG_STATE_SETTLE_TIME) {
1711                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1712                 } else {
1713                         ret = ANEG_TIMER_ENAB;
1714                 }
1715                 break;
1716
1717         case ANEG_STATE_DISABLE_LINK_OK:
1718                 ret = ANEG_DONE;
1719                 break;
1720
1721         case ANEG_STATE_ABILITY_DETECT_INIT:
1722                 ap->flags &= ~(MR_TOGGLE_TX);
1723                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1724                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1725                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1726                 tw32_f(MAC_MODE, tp->mac_mode);
1727                 udelay(40);
1728
1729                 ap->state = ANEG_STATE_ABILITY_DETECT;
1730                 break;
1731
1732         case ANEG_STATE_ABILITY_DETECT:
1733                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1734                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1735                 }
1736                 break;
1737
1738         case ANEG_STATE_ACK_DETECT_INIT:
1739                 ap->txconfig |= ANEG_CFG_ACK;
1740                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1741                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1742                 tw32_f(MAC_MODE, tp->mac_mode);
1743                 udelay(40);
1744
1745                 ap->state = ANEG_STATE_ACK_DETECT;
1746
1747                 /* fallthru */
1748         case ANEG_STATE_ACK_DETECT:
1749                 if (ap->ack_match != 0) {
1750                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1751                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1752                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1753                         } else {
1754                                 ap->state = ANEG_STATE_AN_ENABLE;
1755                         }
1756                 } else if (ap->ability_match != 0 &&
1757                            ap->rxconfig == 0) {
1758                         ap->state = ANEG_STATE_AN_ENABLE;
1759                 }
1760                 break;
1761
1762         case ANEG_STATE_COMPLETE_ACK_INIT:
1763                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1764                         ret = ANEG_FAILED;
1765                         break;
1766                 }
1767                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1768                                MR_LP_ADV_HALF_DUPLEX |
1769                                MR_LP_ADV_SYM_PAUSE |
1770                                MR_LP_ADV_ASYM_PAUSE |
1771                                MR_LP_ADV_REMOTE_FAULT1 |
1772                                MR_LP_ADV_REMOTE_FAULT2 |
1773                                MR_LP_ADV_NEXT_PAGE |
1774                                MR_TOGGLE_RX |
1775                                MR_NP_RX);
1776                 if (ap->rxconfig & ANEG_CFG_FD)
1777                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1778                 if (ap->rxconfig & ANEG_CFG_HD)
1779                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1780                 if (ap->rxconfig & ANEG_CFG_PS1)
1781                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1782                 if (ap->rxconfig & ANEG_CFG_PS2)
1783                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1784                 if (ap->rxconfig & ANEG_CFG_RF1)
1785                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1786                 if (ap->rxconfig & ANEG_CFG_RF2)
1787                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1788                 if (ap->rxconfig & ANEG_CFG_NP)
1789                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1790
1791                 ap->link_time = ap->cur_time;
1792
1793                 ap->flags ^= (MR_TOGGLE_TX);
1794                 if (ap->rxconfig & 0x0008)
1795                         ap->flags |= MR_TOGGLE_RX;
1796                 if (ap->rxconfig & ANEG_CFG_NP)
1797                         ap->flags |= MR_NP_RX;
1798                 ap->flags |= MR_PAGE_RX;
1799
1800                 ap->state = ANEG_STATE_COMPLETE_ACK;
1801                 ret = ANEG_TIMER_ENAB;
1802                 break;
1803
1804         case ANEG_STATE_COMPLETE_ACK:
1805                 if (ap->ability_match != 0 &&
1806                     ap->rxconfig == 0) {
1807                         ap->state = ANEG_STATE_AN_ENABLE;
1808                         break;
1809                 }
1810                 delta = ap->cur_time - ap->link_time;
1811                 if (delta > ANEG_STATE_SETTLE_TIME) {
1812                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1813                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1814                         } else {
1815                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1816                                     !(ap->flags & MR_NP_RX)) {
1817                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1818                                 } else {
1819                                         ret = ANEG_FAILED;
1820                                 }
1821                         }
1822                 }
1823                 break;
1824
1825         case ANEG_STATE_IDLE_DETECT_INIT:
1826                 ap->link_time = ap->cur_time;
1827                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1828                 tw32_f(MAC_MODE, tp->mac_mode);
1829                 udelay(40);
1830
1831                 ap->state = ANEG_STATE_IDLE_DETECT;
1832                 ret = ANEG_TIMER_ENAB;
1833                 break;
1834
1835         case ANEG_STATE_IDLE_DETECT:
1836                 if (ap->ability_match != 0 &&
1837                     ap->rxconfig == 0) {
1838                         ap->state = ANEG_STATE_AN_ENABLE;
1839                         break;
1840                 }
1841                 delta = ap->cur_time - ap->link_time;
1842                 if (delta > ANEG_STATE_SETTLE_TIME) {
1843                         /* XXX another gem from the Broadcom driver :( */
1844                         ap->state = ANEG_STATE_LINK_OK;
1845                 }
1846                 break;
1847
1848         case ANEG_STATE_LINK_OK:
1849                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1850                 ret = ANEG_DONE;
1851                 break;
1852
1853         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1854                 /* ??? unimplemented */
1855                 break;
1856
1857         case ANEG_STATE_NEXT_PAGE_WAIT:
1858                 /* ??? unimplemented */
1859                 break;
1860
1861         default:
1862                 ret = ANEG_FAILED;
1863                 break;
1864         };
1865
1866         return ret;
1867 }
1868
1869 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
1870 {
1871         u32 orig_pause_cfg;
1872         u16 orig_active_speed;
1873         u8 orig_active_duplex;
1874         int current_link_up;
1875         int i;
1876
1877         orig_pause_cfg =
1878                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1879                                   TG3_FLAG_TX_PAUSE));
1880         orig_active_speed = tp->link_config.active_speed;
1881         orig_active_duplex = tp->link_config.active_duplex;
1882
1883         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1884         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1885         tw32_f(MAC_MODE, tp->mac_mode);
1886         udelay(40);
1887
1888         /* Reset when initting first time or we have a link. */
1889         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1890             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1891                 /* Set PLL lock range. */
1892                 tg3_writephy(tp, 0x16, 0x8007);
1893
1894                 /* SW reset */
1895                 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1896
1897                 /* Wait for reset to complete. */
1898                 /* XXX schedule_timeout() ... */
1899                 for (i = 0; i < 500; i++)
1900                         udelay(10);
1901
1902                 /* Config mode; select PMA/Ch 1 regs. */
1903                 tg3_writephy(tp, 0x10, 0x8411);
1904
1905                 /* Enable auto-lock and comdet, select txclk for tx. */
1906                 tg3_writephy(tp, 0x11, 0x0a10);
1907
1908                 tg3_writephy(tp, 0x18, 0x00a0);
1909                 tg3_writephy(tp, 0x16, 0x41ff);
1910
1911                 /* Assert and deassert POR. */
1912                 tg3_writephy(tp, 0x13, 0x0400);
1913                 udelay(40);
1914                 tg3_writephy(tp, 0x13, 0x0000);
1915
1916                 tg3_writephy(tp, 0x11, 0x0a50);
1917                 udelay(40);
1918                 tg3_writephy(tp, 0x11, 0x0a10);
1919
1920                 /* Wait for signal to stabilize */
1921                 /* XXX schedule_timeout() ... */
1922                 for (i = 0; i < 15000; i++)
1923                         udelay(10);
1924
1925                 /* Deselect the channel register so we can read the PHYID
1926                  * later.
1927                  */
1928                 tg3_writephy(tp, 0x10, 0x8011);
1929         }
1930
1931         /* Enable link change interrupt unless serdes polling.  */
1932         if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
1933                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1934         else
1935                 tw32_f(MAC_EVENT, 0);
1936         udelay(40);
1937
1938         current_link_up = 0;
1939         if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
1940                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1941                     !(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
1942                         struct tg3_fiber_aneginfo aninfo;
1943                         int status = ANEG_FAILED;
1944                         unsigned int tick;
1945                         u32 tmp;
1946
1947                         memset(&aninfo, 0, sizeof(aninfo));
1948                         aninfo.flags |= (MR_AN_ENABLE);
1949
1950                         tw32(MAC_TX_AUTO_NEG, 0);
1951
1952                         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1953                         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1954                         udelay(40);
1955
1956                         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1957                         udelay(40);
1958
1959                         aninfo.state = ANEG_STATE_UNKNOWN;
1960                         aninfo.cur_time = 0;
1961                         tick = 0;
1962                         while (++tick < 195000) {
1963                                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
1964                                 if (status == ANEG_DONE ||
1965                                     status == ANEG_FAILED)
1966                                         break;
1967
1968                                 udelay(1);
1969                         }
1970
1971                         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1972                         tw32_f(MAC_MODE, tp->mac_mode);
1973                         udelay(40);
1974
1975                         if (status == ANEG_DONE &&
1976                             (aninfo.flags &
1977                              (MR_AN_COMPLETE | MR_LINK_OK |
1978                               MR_LP_ADV_FULL_DUPLEX))) {
1979                                 u32 local_adv, remote_adv;
1980
1981                                 local_adv = ADVERTISE_PAUSE_CAP;
1982                                 remote_adv = 0;
1983                                 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
1984                                         remote_adv |= LPA_PAUSE_CAP;
1985                                 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
1986                                         remote_adv |= LPA_PAUSE_ASYM;
1987
1988                                 tg3_setup_flow_control(tp, local_adv, remote_adv);
1989
1990                                 tp->tg3_flags |=
1991                                         TG3_FLAG_GOT_SERDES_FLOWCTL;
1992                                 current_link_up = 1;
1993                         }
1994                         for (i = 0; i < 60; i++) {
1995                                 udelay(20);
1996                                 tw32_f(MAC_STATUS,
1997                                      (MAC_STATUS_SYNC_CHANGED |
1998                                       MAC_STATUS_CFG_CHANGED));
1999                                 udelay(40);
2000                                 if ((tr32(MAC_STATUS) &
2001                                      (MAC_STATUS_SYNC_CHANGED |
2002                                       MAC_STATUS_CFG_CHANGED)) == 0)
2003                                         break;
2004                         }
2005                         if (current_link_up == 0 &&
2006                             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
2007                                 current_link_up = 1;
2008                         }
2009                 } else {
2010                         /* Forcing 1000FD link up. */
2011                         current_link_up = 1;
2012                 }
2013         }
2014
2015         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2016         tw32_f(MAC_MODE, tp->mac_mode);
2017         udelay(40);
2018
2019         tp->hw_status->status =
2020                 (SD_STATUS_UPDATED |
2021                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2022
2023         for (i = 0; i < 100; i++) {
2024                 udelay(20);
2025                 tw32_f(MAC_STATUS,
2026                      (MAC_STATUS_SYNC_CHANGED |
2027                       MAC_STATUS_CFG_CHANGED));
2028                 udelay(40);
2029                 if ((tr32(MAC_STATUS) &
2030                      (MAC_STATUS_SYNC_CHANGED |
2031                       MAC_STATUS_CFG_CHANGED)) == 0)
2032                         break;
2033         }
2034
2035         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
2036                 current_link_up = 0;
2037
2038         if (current_link_up == 1) {
2039                 tp->link_config.active_speed = SPEED_1000;
2040                 tp->link_config.active_duplex = DUPLEX_FULL;
2041         } else {
2042                 tp->link_config.active_speed = SPEED_INVALID;
2043                 tp->link_config.active_duplex = DUPLEX_INVALID;
2044         }
2045
2046         if (current_link_up != netif_carrier_ok(tp->dev)) {
2047                 if (current_link_up)
2048                         netif_carrier_on(tp->dev);
2049                 else
2050                         netif_carrier_off(tp->dev);
2051                 tg3_link_report(tp);
2052         } else {
2053                 u32 now_pause_cfg =
2054                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2055                                          TG3_FLAG_TX_PAUSE);
2056                 if (orig_pause_cfg != now_pause_cfg ||
2057                     orig_active_speed != tp->link_config.active_speed ||
2058                     orig_active_duplex != tp->link_config.active_duplex)
2059                         tg3_link_report(tp);
2060         }
2061
2062         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
2063                 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
2064                 udelay(40);
2065                 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
2066                         tw32_f(MAC_MODE, tp->mac_mode);
2067                         udelay(40);
2068                 }
2069         }
2070
2071         return 0;
2072 }
2073
2074 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2075 {
2076         int err;
2077
2078         if (tp->phy_id == PHY_ID_SERDES) {
2079                 err = tg3_setup_fiber_phy(tp, force_reset);
2080         } else {
2081                 err = tg3_setup_copper_phy(tp, force_reset);
2082         }
2083
2084         if (tp->link_config.active_speed == SPEED_1000 &&
2085             tp->link_config.active_duplex == DUPLEX_HALF)
2086                 tw32(MAC_TX_LENGTHS,
2087                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2088                       (6 << TX_LENGTHS_IPG_SHIFT) |
2089                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2090         else
2091                 tw32(MAC_TX_LENGTHS,
2092                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2093                       (6 << TX_LENGTHS_IPG_SHIFT) |
2094                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2095
2096         if (netif_carrier_ok(tp->dev)) {
2097                 tw32(HOSTCC_STAT_COAL_TICKS,
2098                      DEFAULT_STAT_COAL_TICKS);
2099         } else {
2100                 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2101         }
2102
2103         return err;
2104 }
2105
2106 /* Tigon3 never reports partial packet sends.  So we do not
2107  * need special logic to handle SKBs that have not had all
2108  * of their frags sent yet, like SunGEM does.
2109  */
2110 static void tg3_tx(struct tg3 *tp)
2111 {
2112         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2113         u32 sw_idx = tp->tx_cons;
2114
2115         while (sw_idx != hw_idx) {
2116                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2117                 struct sk_buff *skb = ri->skb;
2118                 int i;
2119
2120                 if (unlikely(skb == NULL))
2121                         BUG();
2122
2123                 pci_unmap_single(tp->pdev,
2124                                  pci_unmap_addr(ri, mapping),
2125                                  skb_headlen(skb),
2126                                  PCI_DMA_TODEVICE);
2127
2128                 ri->skb = NULL;
2129
2130                 sw_idx = NEXT_TX(sw_idx);
2131
2132                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2133                         if (unlikely(sw_idx == hw_idx))
2134                                 BUG();
2135
2136                         ri = &tp->tx_buffers[sw_idx];
2137                         if (unlikely(ri->skb != NULL))
2138                                 BUG();
2139
2140                         pci_unmap_page(tp->pdev,
2141                                        pci_unmap_addr(ri, mapping),
2142                                        skb_shinfo(skb)->frags[i].size,
2143                                        PCI_DMA_TODEVICE);
2144
2145                         sw_idx = NEXT_TX(sw_idx);
2146                 }
2147
2148                 dev_kfree_skb_irq(skb);
2149         }
2150
2151         tp->tx_cons = sw_idx;
2152
2153         if (netif_queue_stopped(tp->dev) &&
2154             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2155                 netif_wake_queue(tp->dev);
2156 }
2157
2158 /* Returns size of skb allocated or < 0 on error.
2159  *
2160  * We only need to fill in the address because the other members
2161  * of the RX descriptor are invariant, see tg3_init_rings.
2162  *
2163  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2164  * posting buffers we only dirty the first cache line of the RX
2165  * descriptor (containing the address).  Whereas for the RX status
2166  * buffers the cpu only reads the last cacheline of the RX descriptor
2167  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2168  */
2169 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2170                             int src_idx, u32 dest_idx_unmasked)
2171 {
2172         struct tg3_rx_buffer_desc *desc;
2173         struct ring_info *map, *src_map;
2174         struct sk_buff *skb;
2175         dma_addr_t mapping;
2176         int skb_size, dest_idx;
2177
2178         src_map = NULL;
2179         switch (opaque_key) {
2180         case RXD_OPAQUE_RING_STD:
2181                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2182                 desc = &tp->rx_std[dest_idx];
2183                 map = &tp->rx_std_buffers[dest_idx];
2184                 if (src_idx >= 0)
2185                         src_map = &tp->rx_std_buffers[src_idx];
2186                 skb_size = RX_PKT_BUF_SZ;
2187                 break;
2188
2189         case RXD_OPAQUE_RING_JUMBO:
2190                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2191                 desc = &tp->rx_jumbo[dest_idx];
2192                 map = &tp->rx_jumbo_buffers[dest_idx];
2193                 if (src_idx >= 0)
2194                         src_map = &tp->rx_jumbo_buffers[src_idx];
2195                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2196                 break;
2197
2198         default:
2199                 return -EINVAL;
2200         };
2201
2202         /* Do not overwrite any of the map or rp information
2203          * until we are sure we can commit to a new buffer.
2204          *
2205          * Callers depend upon this behavior and assume that
2206          * we leave everything unchanged if we fail.
2207          */
2208         skb = dev_alloc_skb(skb_size);
2209         if (skb == NULL)
2210                 return -ENOMEM;
2211
2212         skb->dev = tp->dev;
2213         skb_reserve(skb, tp->rx_offset);
2214
2215         mapping = pci_map_single(tp->pdev, skb->data,
2216                                  skb_size - tp->rx_offset,
2217                                  PCI_DMA_FROMDEVICE);
2218
2219         map->skb = skb;
2220         pci_unmap_addr_set(map, mapping, mapping);
2221
2222         if (src_map != NULL)
2223                 src_map->skb = NULL;
2224
2225         desc->addr_hi = ((u64)mapping >> 32);
2226         desc->addr_lo = ((u64)mapping & 0xffffffff);
2227
2228         return skb_size;
2229 }
2230
2231 /* We only need to move over in the address because the other
2232  * members of the RX descriptor are invariant.  See notes above
2233  * tg3_alloc_rx_skb for full details.
2234  */
2235 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2236                            int src_idx, u32 dest_idx_unmasked)
2237 {
2238         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2239         struct ring_info *src_map, *dest_map;
2240         int dest_idx;
2241
2242         switch (opaque_key) {
2243         case RXD_OPAQUE_RING_STD:
2244                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2245                 dest_desc = &tp->rx_std[dest_idx];
2246                 dest_map = &tp->rx_std_buffers[dest_idx];
2247                 src_desc = &tp->rx_std[src_idx];
2248                 src_map = &tp->rx_std_buffers[src_idx];
2249                 break;
2250
2251         case RXD_OPAQUE_RING_JUMBO:
2252                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2253                 dest_desc = &tp->rx_jumbo[dest_idx];
2254                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2255                 src_desc = &tp->rx_jumbo[src_idx];
2256                 src_map = &tp->rx_jumbo_buffers[src_idx];
2257                 break;
2258
2259         default:
2260                 return;
2261         };
2262
2263         dest_map->skb = src_map->skb;
2264         pci_unmap_addr_set(dest_map, mapping,
2265                            pci_unmap_addr(src_map, mapping));
2266         dest_desc->addr_hi = src_desc->addr_hi;
2267         dest_desc->addr_lo = src_desc->addr_lo;
2268
2269         src_map->skb = NULL;
2270 }
2271
2272 #if TG3_VLAN_TAG_USED
2273 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2274 {
2275         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2276 }
2277 #endif
2278
2279 /* The RX ring scheme is composed of multiple rings which post fresh
2280  * buffers to the chip, and one special ring the chip uses to report
2281  * status back to the host.
2282  *
2283  * The special ring reports the status of received packets to the
2284  * host.  The chip does not write into the original descriptor the
2285  * RX buffer was obtained from.  The chip simply takes the original
2286  * descriptor as provided by the host, updates the status and length
2287  * field, then writes this into the next status ring entry.
2288  *
2289  * Each ring the host uses to post buffers to the chip is described
2290  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2291  * it is first placed into the on-chip ram.  When the packet's length
2292  * is known, it walks down the TG3_BDINFO entries to select the ring.
2293  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2294  * which is within the range of the new packet's length is chosen.
2295  *
2296  * The "separate ring for rx status" scheme may sound queer, but it makes
2297  * sense from a cache coherency perspective.  If only the host writes
2298  * to the buffer post rings, and only the chip writes to the rx status
2299  * rings, then cache lines never move beyond shared-modified state.
2300  * If both the host and chip were to write into the same ring, cache line
2301  * eviction could occur since both entities want it in an exclusive state.
2302  */
2303 static int tg3_rx(struct tg3 *tp, int budget)
2304 {
2305         u32 work_mask;
2306         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2307         u16 hw_idx, sw_idx;
2308         int received;
2309
2310         hw_idx = tp->hw_status->idx[0].rx_producer;
2311         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2312         work_mask = 0;
2313         received = 0;
2314         while (sw_idx != hw_idx && budget > 0) {
2315                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2316                 unsigned int len;
2317                 struct sk_buff *skb;
2318                 dma_addr_t dma_addr;
2319                 u32 opaque_key, desc_idx, *post_ptr;
2320
2321                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2322                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2323                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2324                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2325                                                   mapping);
2326                         skb = tp->rx_std_buffers[desc_idx].skb;
2327                         post_ptr = &tp->rx_std_ptr;
2328                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2329                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2330                                                   mapping);
2331                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2332                         post_ptr = &tp->rx_jumbo_ptr;
2333                 }
2334                 else {
2335                         goto next_pkt_nopost;
2336                 }
2337
2338                 work_mask |= opaque_key;
2339
2340                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2341                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2342                 drop_it:
2343                         tg3_recycle_rx(tp, opaque_key,
2344                                        desc_idx, *post_ptr);
2345                 drop_it_no_recycle:
2346                         /* Other statistics kept track of by card. */
2347                         tp->net_stats.rx_dropped++;
2348                         goto next_pkt;
2349                 }
2350
2351                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2352
2353                 if (len > RX_COPY_THRESHOLD) {
2354                         int skb_size;
2355
2356                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2357                                                     desc_idx, *post_ptr);
2358                         if (skb_size < 0)
2359                                 goto drop_it;
2360
2361                         pci_unmap_single(tp->pdev, dma_addr,
2362                                          skb_size - tp->rx_offset,
2363                                          PCI_DMA_FROMDEVICE);
2364
2365                         skb_put(skb, len);
2366                 } else {
2367                         struct sk_buff *copy_skb;
2368
2369                         tg3_recycle_rx(tp, opaque_key,
2370                                        desc_idx, *post_ptr);
2371
2372                         copy_skb = dev_alloc_skb(len + 2);
2373                         if (copy_skb == NULL)
2374                                 goto drop_it_no_recycle;
2375
2376                         copy_skb->dev = tp->dev;
2377                         skb_reserve(copy_skb, 2);
2378                         skb_put(copy_skb, len);
2379                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2380                         memcpy(copy_skb->data, skb->data, len);
2381                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2382
2383                         /* We'll reuse the original ring buffer. */
2384                         skb = copy_skb;
2385                 }
2386
2387                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2388                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2389                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2390                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2391                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2392                 else
2393                         skb->ip_summed = CHECKSUM_NONE;
2394
2395                 skb->protocol = eth_type_trans(skb, tp->dev);
2396 #if TG3_VLAN_TAG_USED
2397                 if (tp->vlgrp != NULL &&
2398                     desc->type_flags & RXD_FLAG_VLAN) {
2399                         tg3_vlan_rx(tp, skb,
2400                                     desc->err_vlan & RXD_VLAN_MASK);
2401                 } else
2402 #endif
2403                         netif_receive_skb(skb);
2404
2405                 tp->dev->last_rx = jiffies;
2406                 received++;
2407                 budget--;
2408
2409 next_pkt:
2410                 (*post_ptr)++;
2411 next_pkt_nopost:
2412                 rx_rcb_ptr++;
2413                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2414         }
2415
2416         /* ACK the status ring. */
2417         tp->rx_rcb_ptr = rx_rcb_ptr;
2418         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2419                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2420
2421         /* Refill RX ring(s). */
2422         if (work_mask & RXD_OPAQUE_RING_STD) {
2423                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2424                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2425                              sw_idx);
2426         }
2427         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2428                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2429                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2430                              sw_idx);
2431         }
2432
2433         return received;
2434 }
2435
2436 static int tg3_poll(struct net_device *netdev, int *budget)
2437 {
2438         struct tg3 *tp = netdev->priv;
2439         struct tg3_hw_status *sblk = tp->hw_status;
2440         unsigned long flags;
2441         int done;
2442
2443         spin_lock_irqsave(&tp->lock, flags);
2444
2445         /* handle link change and other phy events */
2446         if (!(tp->tg3_flags &
2447               (TG3_FLAG_USE_LINKCHG_REG |
2448                TG3_FLAG_POLL_SERDES))) {
2449                 if (sblk->status & SD_STATUS_LINK_CHG) {
2450                         sblk->status = SD_STATUS_UPDATED |
2451                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2452                         tg3_setup_phy(tp, 0);
2453                 }
2454         }
2455
2456         /* run TX completion thread */
2457         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2458                 spin_lock(&tp->tx_lock);
2459                 tg3_tx(tp);
2460                 spin_unlock(&tp->tx_lock);
2461         }
2462
2463         spin_unlock_irqrestore(&tp->lock, flags);
2464
2465         /* run RX thread, within the bounds set by NAPI.
2466          * All RX "locking" is done by ensuring outside
2467          * code synchronizes with dev->poll()
2468          */
2469         done = 1;
2470         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2471                 int orig_budget = *budget;
2472                 int work_done;
2473
2474                 if (orig_budget > netdev->quota)
2475                         orig_budget = netdev->quota;
2476
2477                 work_done = tg3_rx(tp, orig_budget);
2478
2479                 *budget -= work_done;
2480                 netdev->quota -= work_done;
2481
2482                 if (work_done >= orig_budget)
2483                         done = 0;
2484         }
2485
2486         /* if no more work, tell net stack and NIC we're done */
2487         if (done) {
2488                 spin_lock_irqsave(&tp->lock, flags);
2489                 __netif_rx_complete(netdev);
2490                 tg3_enable_ints(tp);
2491                 spin_unlock_irqrestore(&tp->lock, flags);
2492         }
2493
2494         return (done ? 0 : 1);
2495 }
2496
2497 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2498 {
2499         struct tg3_hw_status *sblk = tp->hw_status;
2500         unsigned int work_exists = 0;
2501
2502         /* check for phy events */
2503         if (!(tp->tg3_flags &
2504               (TG3_FLAG_USE_LINKCHG_REG |
2505                TG3_FLAG_POLL_SERDES))) {
2506                 if (sblk->status & SD_STATUS_LINK_CHG)
2507                         work_exists = 1;
2508         }
2509         /* check for RX/TX work to do */
2510         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2511             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2512                 work_exists = 1;
2513
2514         return work_exists;
2515 }
2516
2517 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2518 {
2519         struct net_device *dev = dev_id;
2520         struct tg3 *tp = dev->priv;
2521         struct tg3_hw_status *sblk = tp->hw_status;
2522         unsigned long flags;
2523         unsigned int handled = 1;
2524
2525         spin_lock_irqsave(&tp->lock, flags);
2526
2527         if (sblk->status & SD_STATUS_UPDATED) {
2528                 /*
2529                  * writing any value to intr-mbox-0 clears PCI INTA# and
2530                  * chip-internal interrupt pending events.
2531                  * writing non-zero to intr-mbox-0 additional tells the
2532                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2533                  * event coalescing.
2534                  */
2535                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2536                              0x00000001);
2537                 /*
2538                  * Flush PCI write.  This also guarantees that our
2539                  * status block has been flushed to host memory.
2540                  */
2541                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2542                 sblk->status &= ~SD_STATUS_UPDATED;
2543
2544                 if (likely(tg3_has_work(dev, tp)))
2545                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2546                 else {
2547                         /* no work, shared interrupt perhaps?  re-enable
2548                          * interrupts, and flush that PCI write
2549                          */
2550                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2551                                 0x00000000);
2552                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2553                 }
2554         } else {        /* shared interrupt */
2555                 handled = 0;
2556         }
2557
2558         spin_unlock_irqrestore(&tp->lock, flags);
2559
2560         return IRQ_RETVAL(handled);
2561 }
2562
2563 static int tg3_init_hw(struct tg3 *);
2564 static int tg3_halt(struct tg3 *);
2565
2566 #ifdef CONFIG_NET_POLL_CONTROLLER
2567 static void tg3_poll_controller(struct net_device *dev)
2568 {
2569         tg3_interrupt(dev->irq, dev, NULL);
2570 }
2571 #endif
2572
2573 static void tg3_reset_task(void *_data)
2574 {
2575         struct tg3 *tp = _data;
2576         unsigned int restart_timer;
2577
2578         tg3_netif_stop(tp);
2579
2580         spin_lock_irq(&tp->lock);
2581         spin_lock(&tp->tx_lock);
2582
2583         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2584         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2585
2586         tg3_halt(tp);
2587         tg3_init_hw(tp);
2588
2589         spin_unlock(&tp->tx_lock);
2590         spin_unlock_irq(&tp->lock);
2591
2592         tg3_netif_start(tp);
2593
2594         if (restart_timer)
2595                 mod_timer(&tp->timer, jiffies + 1);
2596 }
2597
2598 static void tg3_tx_timeout(struct net_device *dev)
2599 {
2600         struct tg3 *tp = dev->priv;
2601
2602         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2603                dev->name);
2604
2605         schedule_work(&tp->reset_task);
2606 }
2607
2608 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2609
2610 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2611                                        u32 guilty_entry, int guilty_len,
2612                                        u32 last_plus_one, u32 *start, u32 mss)
2613 {
2614         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2615         dma_addr_t new_addr;
2616         u32 entry = *start;
2617         int i;
2618
2619         if (!new_skb) {
2620                 dev_kfree_skb(skb);
2621                 return -1;
2622         }
2623
2624         /* New SKB is guaranteed to be linear. */
2625         entry = *start;
2626         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2627                                   PCI_DMA_TODEVICE);
2628         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2629                     (skb->ip_summed == CHECKSUM_HW) ?
2630                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2631         *start = NEXT_TX(entry);
2632
2633         /* Now clean up the sw ring entries. */
2634         i = 0;
2635         while (entry != last_plus_one) {
2636                 int len;
2637
2638                 if (i == 0)
2639                         len = skb_headlen(skb);
2640                 else
2641                         len = skb_shinfo(skb)->frags[i-1].size;
2642                 pci_unmap_single(tp->pdev,
2643                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2644                                  len, PCI_DMA_TODEVICE);
2645                 if (i == 0) {
2646                         tp->tx_buffers[entry].skb = new_skb;
2647                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2648                 } else {
2649                         tp->tx_buffers[entry].skb = NULL;
2650                 }
2651                 entry = NEXT_TX(entry);
2652         }
2653
2654         dev_kfree_skb(skb);
2655
2656         return 0;
2657 }
2658
2659 static void tg3_set_txd(struct tg3 *tp, int entry,
2660                         dma_addr_t mapping, int len, u32 flags,
2661                         u32 mss_and_is_end)
2662 {
2663         int is_end = (mss_and_is_end & 0x1);
2664         u32 mss = (mss_and_is_end >> 1);
2665         u32 vlan_tag = 0;
2666
2667         if (is_end)
2668                 flags |= TXD_FLAG_END;
2669         if (flags & TXD_FLAG_VLAN) {
2670                 vlan_tag = flags >> 16;
2671                 flags &= 0xffff;
2672         }
2673         vlan_tag |= (mss << TXD_MSS_SHIFT);
2674         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2675                 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2676
2677                 txd->addr_hi = ((u64) mapping >> 32);
2678                 txd->addr_lo = ((u64) mapping & 0xffffffff);
2679                 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2680                 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2681         } else {
2682                 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2683                 unsigned long txd;
2684
2685                 txd = (tp->regs +
2686                        NIC_SRAM_WIN_BASE +
2687                        NIC_SRAM_TX_BUFFER_DESC);
2688                 txd += (entry * TXD_SIZE);
2689
2690                 /* Save some PIOs */
2691                 if (sizeof(dma_addr_t) != sizeof(u32))
2692                         writel(((u64) mapping >> 32),
2693                                txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2694
2695                 writel(((u64) mapping & 0xffffffff),
2696                        txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2697                 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2698                 if (txr->prev_vlan_tag != vlan_tag) {
2699                         writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2700                         txr->prev_vlan_tag = vlan_tag;
2701                 }
2702         }
2703 }
2704
2705 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2706 {
2707         u32 base = (u32) mapping & 0xffffffff;
2708
2709         return ((base > 0xffffdcc0) &&
2710                 ((u64) mapping >> 32) == 0 &&
2711                 (base + len + 8 < base));
2712 }
2713
2714 static int tg3_start_xmit_4gbug(struct sk_buff *skb, struct net_device *dev)
2715 {
2716         struct tg3 *tp = dev->priv;
2717         dma_addr_t mapping;
2718         unsigned int i;
2719         u32 len, entry, base_flags, mss;
2720         int would_hit_hwbug;
2721         unsigned long flags;
2722
2723         len = skb_headlen(skb);
2724
2725         /* No BH disabling for tx_lock here.  We are running in BH disabled
2726          * context and TX reclaim runs via tp->poll inside of a software
2727          * interrupt.  Rejoice!
2728          *
2729          * Actually, things are not so simple.  If we are to take a hw
2730          * IRQ here, we can deadlock, consider:
2731          *
2732          *       CPU1           CPU2
2733          *   tg3_start_xmit
2734          *   take tp->tx_lock
2735          *                      tg3_timer
2736          *                      take tp->lock
2737          *   tg3_interrupt
2738          *   spin on tp->lock
2739          *                      spin on tp->tx_lock
2740          *
2741          * So we really do need to disable interrupts when taking
2742          * tx_lock here.
2743          */
2744         spin_lock_irqsave(&tp->tx_lock, flags);
2745
2746         /* This is a hard error, log it. */
2747         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2748                 netif_stop_queue(dev);
2749                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2750                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2751                        dev->name);
2752                 return 1;
2753         }
2754
2755         entry = tp->tx_prod;
2756         base_flags = 0;
2757         if (skb->ip_summed == CHECKSUM_HW)
2758                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2759 #if TG3_TSO_SUPPORT != 0
2760         mss = 0;
2761         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2762             (mss = skb_shinfo(skb)->tso_size) != 0) {
2763                 int tcp_opt_len, ip_tcp_len;
2764
2765                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2766                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2767
2768                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2769                                TXD_FLAG_CPU_POST_DMA);
2770
2771                 skb->nh.iph->check = 0;
2772                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2773                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2774                                                       skb->nh.iph->daddr,
2775                                                       0, IPPROTO_TCP, 0);
2776
2777                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2778                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2779                                 int tsflags;
2780
2781                                 tsflags = ((skb->nh.iph->ihl - 5) +
2782                                            (tcp_opt_len >> 2));
2783                                 mss |= (tsflags << 11);
2784                         }
2785                 } else {
2786                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2787                                 int tsflags;
2788
2789                                 tsflags = ((skb->nh.iph->ihl - 5) +
2790                                            (tcp_opt_len >> 2));
2791                                 base_flags |= tsflags << 12;
2792                         }
2793                 }
2794         }
2795 #else
2796         mss = 0;
2797 #endif
2798 #if TG3_VLAN_TAG_USED
2799         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2800                 base_flags |= (TXD_FLAG_VLAN |
2801                                (vlan_tx_tag_get(skb) << 16));
2802 #endif
2803
2804         /* Queue skb data, a.k.a. the main skb fragment. */
2805         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2806
2807         tp->tx_buffers[entry].skb = skb;
2808         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2809
2810         would_hit_hwbug = 0;
2811
2812         if (tg3_4g_overflow_test(mapping, len))
2813                 would_hit_hwbug = entry + 1;
2814
2815         tg3_set_txd(tp, entry, mapping, len, base_flags,
2816                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2817
2818         entry = NEXT_TX(entry);
2819
2820         /* Now loop through additional data fragments, and queue them. */
2821         if (skb_shinfo(skb)->nr_frags > 0) {
2822                 unsigned int i, last;
2823
2824                 last = skb_shinfo(skb)->nr_frags - 1;
2825                 for (i = 0; i <= last; i++) {
2826                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2827
2828                         len = frag->size;
2829                         mapping = pci_map_page(tp->pdev,
2830                                                frag->page,
2831                                                frag->page_offset,
2832                                                len, PCI_DMA_TODEVICE);
2833
2834                         tp->tx_buffers[entry].skb = NULL;
2835                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2836
2837                         if (tg3_4g_overflow_test(mapping, len)) {
2838                                 /* Only one should match. */
2839                                 if (would_hit_hwbug)
2840                                         BUG();
2841                                 would_hit_hwbug = entry + 1;
2842                         }
2843
2844                         tg3_set_txd(tp, entry, mapping, len,
2845                                     base_flags, (i == last));
2846
2847                         entry = NEXT_TX(entry);
2848                 }
2849         }
2850
2851         if (would_hit_hwbug) {
2852                 u32 last_plus_one = entry;
2853                 u32 start;
2854                 unsigned int len = 0;
2855
2856                 would_hit_hwbug -= 1;
2857                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
2858                 entry &= (TG3_TX_RING_SIZE - 1);
2859                 start = entry;
2860                 i = 0;
2861                 while (entry != last_plus_one) {
2862                         if (i == 0)
2863                                 len = skb_headlen(skb);
2864                         else
2865                                 len = skb_shinfo(skb)->frags[i-1].size;
2866
2867                         if (entry == would_hit_hwbug)
2868                                 break;
2869
2870                         i++;
2871                         entry = NEXT_TX(entry);
2872
2873                 }
2874
2875                 /* If the workaround fails due to memory/mapping
2876                  * failure, silently drop this packet.
2877                  */
2878                 if (tigon3_4gb_hwbug_workaround(tp, skb,
2879                                                 entry, len,
2880                                                 last_plus_one,
2881                                                 &start, mss))
2882                         goto out_unlock;
2883
2884                 entry = start;
2885         }
2886
2887         /* Packets are ready, update Tx producer idx local and on card. */
2888         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2889                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2890                               TG3_64BIT_REG_LOW), entry);
2891         } else {
2892                 /* First, make sure tg3 sees last descriptor fully
2893                  * in SRAM.
2894                  */
2895                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2896                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
2897
2898                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2899                               TG3_64BIT_REG_LOW), entry);
2900         }
2901
2902         tp->tx_prod = entry;
2903         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
2904                 netif_stop_queue(dev);
2905
2906 out_unlock:
2907         spin_unlock_irqrestore(&tp->tx_lock, flags);
2908
2909         dev->trans_start = jiffies;
2910
2911         return 0;
2912 }
2913
2914 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2915 {
2916         struct tg3 *tp = dev->priv;
2917         dma_addr_t mapping;
2918         u32 len, entry, base_flags, mss;
2919         unsigned long flags;
2920
2921         len = skb_headlen(skb);
2922
2923         /* No BH disabling for tx_lock here.  We are running in BH disabled
2924          * context and TX reclaim runs via tp->poll inside of a software
2925          * interrupt.  Rejoice!
2926          *
2927          * Actually, things are not so simple.  If we are to take a hw
2928          * IRQ here, we can deadlock, consider:
2929          *
2930          *       CPU1           CPU2
2931          *   tg3_start_xmit
2932          *   take tp->tx_lock
2933          *                      tg3_timer
2934          *                      take tp->lock
2935          *   tg3_interrupt
2936          *   spin on tp->lock
2937          *                      spin on tp->tx_lock
2938          *
2939          * So we really do need to disable interrupts when taking
2940          * tx_lock here.
2941          */
2942         spin_lock_irqsave(&tp->tx_lock, flags);
2943
2944         /* This is a hard error, log it. */
2945         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2946                 netif_stop_queue(dev);
2947                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2948                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2949                        dev->name);
2950                 return 1;
2951         }
2952
2953         entry = tp->tx_prod;
2954         base_flags = 0;
2955         if (skb->ip_summed == CHECKSUM_HW)
2956                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2957 #if TG3_TSO_SUPPORT != 0
2958         mss = 0;
2959         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2960             (mss = skb_shinfo(skb)->tso_size) != 0) {
2961                 int tcp_opt_len, ip_tcp_len;
2962
2963                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2964                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2965
2966                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2967                                TXD_FLAG_CPU_POST_DMA);
2968
2969                 skb->nh.iph->check = 0;
2970                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2971                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2972                                                       skb->nh.iph->daddr,
2973                                                       0, IPPROTO_TCP, 0);
2974
2975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2976                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2977                                 int tsflags;
2978
2979                                 tsflags = ((skb->nh.iph->ihl - 5) +
2980                                            (tcp_opt_len >> 2));
2981                                 mss |= (tsflags << 11);
2982                         }
2983                 } else {
2984                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2985                                 int tsflags;
2986
2987                                 tsflags = ((skb->nh.iph->ihl - 5) +
2988                                            (tcp_opt_len >> 2));
2989                                 base_flags |= tsflags << 12;
2990                         }
2991                 }
2992         }
2993 #else
2994         mss = 0;
2995 #endif
2996 #if TG3_VLAN_TAG_USED
2997         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2998                 base_flags |= (TXD_FLAG_VLAN |
2999                                (vlan_tx_tag_get(skb) << 16));
3000 #endif
3001
3002         /* Queue skb data, a.k.a. the main skb fragment. */
3003         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3004
3005         tp->tx_buffers[entry].skb = skb;
3006         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3007
3008         tg3_set_txd(tp, entry, mapping, len, base_flags,
3009                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3010
3011         entry = NEXT_TX(entry);
3012
3013         /* Now loop through additional data fragments, and queue them. */
3014         if (skb_shinfo(skb)->nr_frags > 0) {
3015                 unsigned int i, last;
3016
3017                 last = skb_shinfo(skb)->nr_frags - 1;
3018                 for (i = 0; i <= last; i++) {
3019                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3020
3021
3022                         len = frag->size;
3023                         mapping = pci_map_page(tp->pdev,
3024                                                frag->page,
3025                                                frag->page_offset,
3026                                                len, PCI_DMA_TODEVICE);
3027
3028                         tp->tx_buffers[entry].skb = NULL;
3029                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3030
3031                         tg3_set_txd(tp, entry, mapping, len,
3032                                     base_flags, (i == last));
3033
3034                         entry = NEXT_TX(entry);
3035                 }
3036         }
3037
3038         /* Packets are ready, update Tx producer idx local and on card.
3039          * We know this is not a 5700 (by virtue of not being a chip
3040          * requiring the 4GB overflow workaround) so we can safely omit
3041          * the double-write bug tests.
3042          */
3043         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3044                 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
3045                               TG3_64BIT_REG_LOW), entry);
3046         } else {
3047                 /* First, make sure tg3 sees last descriptor fully
3048                  * in SRAM.
3049                  */
3050                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
3051                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
3052                              TG3_64BIT_REG_LOW);
3053
3054                 tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
3055                               TG3_64BIT_REG_LOW), entry);
3056         }
3057
3058         tp->tx_prod = entry;
3059         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3060                 netif_stop_queue(dev);
3061
3062         spin_unlock_irqrestore(&tp->tx_lock, flags);
3063
3064         dev->trans_start = jiffies;
3065
3066         return 0;
3067 }
3068
3069 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3070                                int new_mtu)
3071 {
3072         dev->mtu = new_mtu;
3073
3074         if (new_mtu > ETH_DATA_LEN)
3075                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3076         else
3077                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3078 }
3079
3080 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3081 {
3082         struct tg3 *tp = dev->priv;
3083
3084         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3085                 return -EINVAL;
3086
3087         if (!netif_running(dev)) {
3088                 /* We'll just catch it later when the
3089                  * device is up'd.
3090                  */
3091                 tg3_set_mtu(dev, tp, new_mtu);
3092                 return 0;
3093         }
3094
3095         tg3_netif_stop(tp);
3096         spin_lock_irq(&tp->lock);
3097         spin_lock(&tp->tx_lock);
3098
3099         tg3_halt(tp);
3100
3101         tg3_set_mtu(dev, tp, new_mtu);
3102
3103         tg3_init_hw(tp);
3104
3105         spin_unlock(&tp->tx_lock);
3106         spin_unlock_irq(&tp->lock);
3107         tg3_netif_start(tp);
3108
3109         return 0;
3110 }
3111
3112 /* Free up pending packets in all rx/tx rings.
3113  *
3114  * The chip has been shut down and the driver detached from
3115  * the networking, so no interrupts or new tx packets will
3116  * end up in the driver.  tp->{tx,}lock is not held and we are not
3117  * in an interrupt context and thus may sleep.
3118  */
3119 static void tg3_free_rings(struct tg3 *tp)
3120 {
3121         struct ring_info *rxp;
3122         int i;
3123
3124         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3125                 rxp = &tp->rx_std_buffers[i];
3126
3127                 if (rxp->skb == NULL)
3128                         continue;
3129                 pci_unmap_single(tp->pdev,
3130                                  pci_unmap_addr(rxp, mapping),
3131                                  RX_PKT_BUF_SZ - tp->rx_offset,
3132                                  PCI_DMA_FROMDEVICE);
3133                 dev_kfree_skb_any(rxp->skb);
3134                 rxp->skb = NULL;
3135         }
3136
3137         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3138                 rxp = &tp->rx_jumbo_buffers[i];
3139
3140                 if (rxp->skb == NULL)
3141                         continue;
3142                 pci_unmap_single(tp->pdev,
3143                                  pci_unmap_addr(rxp, mapping),
3144                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3145                                  PCI_DMA_FROMDEVICE);
3146                 dev_kfree_skb_any(rxp->skb);
3147                 rxp->skb = NULL;
3148         }
3149
3150         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3151                 struct tx_ring_info *txp;
3152                 struct sk_buff *skb;
3153                 int j;
3154
3155                 txp = &tp->tx_buffers[i];
3156                 skb = txp->skb;
3157
3158                 if (skb == NULL) {
3159                         i++;
3160                         continue;
3161                 }
3162
3163                 pci_unmap_single(tp->pdev,
3164                                  pci_unmap_addr(txp, mapping),
3165                                  skb_headlen(skb),
3166                                  PCI_DMA_TODEVICE);
3167                 txp->skb = NULL;
3168
3169                 i++;
3170
3171                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3172                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3173                         pci_unmap_page(tp->pdev,
3174                                        pci_unmap_addr(txp, mapping),
3175                                        skb_shinfo(skb)->frags[j].size,
3176                                        PCI_DMA_TODEVICE);
3177                         i++;
3178                 }
3179
3180                 dev_kfree_skb_any(skb);
3181         }
3182 }
3183
3184 /* Initialize tx/rx rings for packet processing.
3185  *
3186  * The chip has been shut down and the driver detached from
3187  * the networking, so no interrupts or new tx packets will
3188  * end up in the driver.  tp->{tx,}lock are held and thus
3189  * we may not sleep.
3190  */
3191 static void tg3_init_rings(struct tg3 *tp)
3192 {
3193         unsigned long start, end;
3194         u32 i;
3195
3196         /* Free up all the SKBs. */
3197         tg3_free_rings(tp);
3198
3199         /* Zero out all descriptors. */
3200         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3201         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3202         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3203
3204         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3205                 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3206         } else {
3207                 start = (tp->regs +
3208                          NIC_SRAM_WIN_BASE +
3209                          NIC_SRAM_TX_BUFFER_DESC);
3210                 end = start + TG3_TX_RING_BYTES;
3211                 while (start < end) {
3212                         writel(0, start);
3213                         start += 4;
3214                 }
3215                 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3216                         tp->tx_buffers[i].prev_vlan_tag = 0;
3217         }
3218
3219         /* Initialize invariants of the rings, we only set this
3220          * stuff once.  This works because the card does not
3221          * write into the rx buffer posting rings.
3222          */
3223         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3224                 struct tg3_rx_buffer_desc *rxd;
3225
3226                 rxd = &tp->rx_std[i];
3227                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3228                         << RXD_LEN_SHIFT;
3229                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3230                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3231                                (i << RXD_OPAQUE_INDEX_SHIFT));
3232         }
3233
3234         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3235                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3236                         struct tg3_rx_buffer_desc *rxd;
3237
3238                         rxd = &tp->rx_jumbo[i];
3239                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3240                                 << RXD_LEN_SHIFT;
3241                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3242                                 RXD_FLAG_JUMBO;
3243                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3244                                (i << RXD_OPAQUE_INDEX_SHIFT));
3245                 }
3246         }
3247
3248         /* Now allocate fresh SKBs for each rx ring. */
3249         for (i = 0; i < tp->rx_pending; i++) {
3250                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3251                                      -1, i) < 0)
3252                         break;
3253         }
3254
3255         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3256                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3257                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3258                                              -1, i) < 0)
3259                                 break;
3260                 }
3261         }
3262 }
3263
3264 /*
3265  * Must not be invoked with interrupt sources disabled and
3266  * the hardware shutdown down.
3267  */
3268 static void tg3_free_consistent(struct tg3 *tp)
3269 {
3270         if (tp->rx_std_buffers) {
3271                 kfree(tp->rx_std_buffers);
3272                 tp->rx_std_buffers = NULL;
3273         }
3274         if (tp->rx_std) {
3275                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3276                                     tp->rx_std, tp->rx_std_mapping);
3277                 tp->rx_std = NULL;
3278         }
3279         if (tp->rx_jumbo) {
3280                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3281                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3282                 tp->rx_jumbo = NULL;
3283         }
3284         if (tp->rx_rcb) {
3285                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3286                                     tp->rx_rcb, tp->rx_rcb_mapping);
3287                 tp->rx_rcb = NULL;
3288         }
3289         if (tp->tx_ring) {
3290                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3291                         tp->tx_ring, tp->tx_desc_mapping);
3292                 tp->tx_ring = NULL;
3293         }
3294         if (tp->hw_status) {
3295                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3296                                     tp->hw_status, tp->status_mapping);
3297                 tp->hw_status = NULL;
3298         }
3299         if (tp->hw_stats) {
3300                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3301                                     tp->hw_stats, tp->stats_mapping);
3302                 tp->hw_stats = NULL;
3303         }
3304 }
3305
3306 /*
3307  * Must not be invoked with interrupt sources disabled and
3308  * the hardware shutdown down.  Can sleep.
3309  */
3310 static int tg3_alloc_consistent(struct tg3 *tp)
3311 {
3312         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3313                                       (TG3_RX_RING_SIZE +
3314                                        TG3_RX_JUMBO_RING_SIZE)) +
3315                                      (sizeof(struct tx_ring_info) *
3316                                       TG3_TX_RING_SIZE),
3317                                      GFP_KERNEL);
3318         if (!tp->rx_std_buffers)
3319                 return -ENOMEM;
3320
3321         memset(tp->rx_std_buffers, 0,
3322                (sizeof(struct ring_info) *
3323                 (TG3_RX_RING_SIZE +
3324                  TG3_RX_JUMBO_RING_SIZE)) +
3325                (sizeof(struct tx_ring_info) *
3326                 TG3_TX_RING_SIZE));
3327
3328         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3329         tp->tx_buffers = (struct tx_ring_info *)
3330                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3331
3332         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3333                                           &tp->rx_std_mapping);
3334         if (!tp->rx_std)
3335                 goto err_out;
3336
3337         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3338                                             &tp->rx_jumbo_mapping);
3339
3340         if (!tp->rx_jumbo)
3341                 goto err_out;
3342
3343         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3344                                           &tp->rx_rcb_mapping);
3345         if (!tp->rx_rcb)
3346                 goto err_out;
3347
3348         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3349                 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3350                                                    &tp->tx_desc_mapping);
3351                 if (!tp->tx_ring)
3352                         goto err_out;
3353         } else {
3354                 tp->tx_ring = NULL;
3355                 tp->tx_desc_mapping = 0;
3356         }
3357
3358         tp->hw_status = pci_alloc_consistent(tp->pdev,
3359                                              TG3_HW_STATUS_SIZE,
3360                                              &tp->status_mapping);
3361         if (!tp->hw_status)
3362                 goto err_out;
3363
3364         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3365                                             sizeof(struct tg3_hw_stats),
3366                                             &tp->stats_mapping);
3367         if (!tp->hw_stats)
3368                 goto err_out;
3369
3370         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3371         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3372
3373         return 0;
3374
3375 err_out:
3376         tg3_free_consistent(tp);
3377         return -ENOMEM;
3378 }
3379
3380 #define MAX_WAIT_CNT 1000
3381
3382 /* To stop a block, clear the enable bit and poll till it
3383  * clears.  tp->lock is held.
3384  */
3385 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3386 {
3387         unsigned int i;
3388         u32 val;
3389
3390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3391                 switch (ofs) {
3392                 case RCVLSC_MODE:
3393                 case DMAC_MODE:
3394                 case MBFREE_MODE:
3395                 case BUFMGR_MODE:
3396                 case MEMARB_MODE:
3397                         /* We can't enable/disable these bits of the
3398                          * 5705, just say success.
3399                          */
3400                         return 0;
3401
3402                 default:
3403                         break;
3404                 };
3405         }
3406
3407         val = tr32(ofs);
3408         val &= ~enable_bit;
3409         tw32_f(ofs, val);
3410
3411         for (i = 0; i < MAX_WAIT_CNT; i++) {
3412                 udelay(100);
3413                 val = tr32(ofs);
3414                 if ((val & enable_bit) == 0)
3415                         break;
3416         }
3417
3418         if (i == MAX_WAIT_CNT) {
3419                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3420                        "ofs=%lx enable_bit=%x\n",
3421                        ofs, enable_bit);
3422                 return -ENODEV;
3423         }
3424
3425         return 0;
3426 }
3427
3428 /* tp->lock is held. */
3429 static int tg3_abort_hw(struct tg3 *tp)
3430 {
3431         int i, err;
3432
3433         tg3_disable_ints(tp);
3434
3435         tp->rx_mode &= ~RX_MODE_ENABLE;
3436         tw32_f(MAC_RX_MODE, tp->rx_mode);
3437         udelay(10);
3438
3439         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3440         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3441         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3442         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3443         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3444         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3445
3446         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3447         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3448         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3449         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3450         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3451         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3452         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3453         if (err)
3454                 goto out;
3455
3456         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3457         tw32_f(MAC_MODE, tp->mac_mode);
3458         udelay(40);
3459
3460         tp->tx_mode &= ~TX_MODE_ENABLE;
3461         tw32_f(MAC_TX_MODE, tp->tx_mode);
3462
3463         for (i = 0; i < MAX_WAIT_CNT; i++) {
3464                 udelay(100);
3465                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3466                         break;
3467         }
3468         if (i >= MAX_WAIT_CNT) {
3469                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3470                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3471                        tp->dev->name, tr32(MAC_TX_MODE));
3472                 return -ENODEV;
3473         }
3474
3475         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3476         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3477         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3478
3479         tw32(FTQ_RESET, 0xffffffff);
3480         tw32(FTQ_RESET, 0x00000000);
3481
3482         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3483         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3484         if (err)
3485                 goto out;
3486
3487         if (tp->hw_status)
3488                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3489         if (tp->hw_stats)
3490                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3491
3492 out:
3493         return err;
3494 }
3495
3496 /* tp->lock is held. */
3497 static int tg3_chip_reset(struct tg3 *tp)
3498 {
3499         u32 val;
3500         u32 flags_save;
3501         int i;
3502
3503         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3504                 /* Force NVRAM to settle.
3505                  * This deals with a chip bug which can result in EEPROM
3506                  * corruption.
3507                  */
3508                 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3509                         int i;
3510
3511                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3512                         for (i = 0; i < 100000; i++) {
3513                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3514                                         break;
3515                                 udelay(10);
3516                         }
3517                 }
3518         }
3519
3520         /*
3521          * We must avoid the readl() that normally takes place.
3522          * It locks machines, causes machine checks, and other
3523          * fun things.  So, temporarily disable the 5701
3524          * hardware workaround, while we do the reset.
3525          */
3526         flags_save = tp->tg3_flags;
3527         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3528
3529         /* do the reset */
3530         val = GRC_MISC_CFG_CORECLK_RESET;
3531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3532                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3533         tw32(GRC_MISC_CFG, val);
3534
3535         /* restore 5701 hardware bug workaround flag */
3536         tp->tg3_flags = flags_save;
3537
3538         /* Flush PCI posted writes.  The normal MMIO registers
3539          * are inaccessible at this time so this is the only
3540          * way to make this reliably.  I tried to use indirect
3541          * register read/write but this upset some 5701 variants.
3542          */
3543         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3544
3545         udelay(40);
3546         udelay(40);
3547         udelay(40);
3548
3549         /* Re-enable indirect register accesses. */
3550         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3551                                tp->misc_host_ctrl);
3552
3553         /* Set MAX PCI retry to zero. */
3554         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3555         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3556             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3557                 val |= PCISTATE_RETRY_SAME_DMA;
3558         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3559
3560         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3561
3562         /* Make sure PCI-X relaxed ordering bit is clear. */
3563         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3564         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3565         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3566
3567         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3568
3569         tw32(GRC_MODE, tp->grc_mode);
3570
3571         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3573                 tp->pci_clock_ctrl |=
3574                         (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE);
3575                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3576         }
3577
3578         /* Prevent PXE from restarting.  */
3579         tg3_write_mem(tp,
3580                       NIC_SRAM_FIRMWARE_MBOX,
3581                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3582
3583         if (tp->phy_id == PHY_ID_SERDES) {
3584                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3585                 tw32_f(MAC_MODE, tp->mac_mode);
3586         } else
3587                 tw32_f(MAC_MODE, 0);
3588         udelay(40);
3589
3590         /* Wait for firmware initialization to complete. */
3591         for (i = 0; i < 100000; i++) {
3592                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3593                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3594                         break;
3595                 udelay(10);
3596         }
3597         if (i >= 100000 &&
3598             !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3599                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3600                        "firmware will not restart magic=%08x\n",
3601                        tp->dev->name, val);
3602                 return -ENODEV;
3603         }
3604
3605         /* Reprobe ASF enable state.  */
3606         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3607         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3608         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3609                 u32 nic_cfg;
3610
3611                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3612                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
3613                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3614         }
3615
3616         return 0;
3617 }
3618
3619 /* tp->lock is held. */
3620 static void tg3_stop_fw(struct tg3 *tp)
3621 {
3622         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3623                 u32 val;
3624                 int i;
3625
3626                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3627                 val = tr32(GRC_RX_CPU_EVENT);
3628                 val |= (1 << 14);
3629                 tw32(GRC_RX_CPU_EVENT, val);
3630
3631                 /* Wait for RX cpu to ACK the event.  */
3632                 for (i = 0; i < 100; i++) {
3633                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3634                                 break;
3635                         udelay(1);
3636                 }
3637         }
3638 }
3639
3640 /* tp->lock is held. */
3641 static int tg3_halt(struct tg3 *tp)
3642 {
3643         int err;
3644
3645         tg3_stop_fw(tp);
3646         tg3_abort_hw(tp);
3647         err = tg3_chip_reset(tp);
3648         if (err)
3649                 return err;
3650
3651         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
3652                 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3653                               DRV_STATE_UNLOAD);
3654
3655         return 0;
3656 }
3657
3658 #define TG3_FW_RELEASE_MAJOR    0x0
3659 #define TG3_FW_RELASE_MINOR     0x0
3660 #define TG3_FW_RELEASE_FIX      0x0
3661 #define TG3_FW_START_ADDR       0x08000000
3662 #define TG3_FW_TEXT_ADDR        0x08000000
3663 #define TG3_FW_TEXT_LEN         0x9c0
3664 #define TG3_FW_RODATA_ADDR      0x080009c0
3665 #define TG3_FW_RODATA_LEN       0x60
3666 #define TG3_FW_DATA_ADDR        0x08000a40
3667 #define TG3_FW_DATA_LEN         0x20
3668 #define TG3_FW_SBSS_ADDR        0x08000a60
3669 #define TG3_FW_SBSS_LEN         0xc
3670 #define TG3_FW_BSS_ADDR         0x08000a70
3671 #define TG3_FW_BSS_LEN          0x10
3672
3673 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3674         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3675         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3676         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3677         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3678         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3679         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3680         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3681         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3682         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3683         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3684         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3685         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3686         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3687         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3688         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3689         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3690         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3691         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3692         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3693         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3694         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3695         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3696         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3697         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3698         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3699         0, 0, 0, 0, 0, 0,
3700         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3701         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3702         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3703         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3704         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3705         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3706         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3707         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3708         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3709         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3710         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3711         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3712         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3713         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3714         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3715         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3716         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3717         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3718         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3719         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3720         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3721         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3722         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3723         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3724         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3725         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3726         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3727         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3728         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3729         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3730         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3731         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3732         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3733         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3734         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3735         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3736         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3737         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3738         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3739         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3740         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3741         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3742         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3743         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3744         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3745         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3746         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3747         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3748         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3749         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3750         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3751         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3752         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3753         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3754         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3755         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3756         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3757         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3758         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3759         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3760         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3761         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3762         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
3763         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
3764         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
3765 };
3766
3767 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
3768         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
3769         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
3770         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
3771         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
3772         0x00000000
3773 };
3774
3775 #if 0 /* All zeros, don't eat up space with it. */
3776 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
3777         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3778         0x00000000, 0x00000000, 0x00000000, 0x00000000
3779 };
3780 #endif
3781
3782 #define RX_CPU_SCRATCH_BASE     0x30000
3783 #define RX_CPU_SCRATCH_SIZE     0x04000
3784 #define TX_CPU_SCRATCH_BASE     0x34000
3785 #define TX_CPU_SCRATCH_SIZE     0x04000
3786
3787 /* tp->lock is held. */
3788 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3789 {
3790         int i;
3791
3792         if (offset == TX_CPU_BASE &&
3793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3794                 BUG();
3795
3796         if (offset == RX_CPU_BASE) {
3797                 for (i = 0; i < 10000; i++) {
3798                         tw32(offset + CPU_STATE, 0xffffffff);
3799                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3800                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3801                                 break;
3802                 }
3803
3804                 tw32(offset + CPU_STATE, 0xffffffff);
3805                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3806                 udelay(10);
3807         } else {
3808                 for (i = 0; i < 10000; i++) {
3809                         tw32(offset + CPU_STATE, 0xffffffff);
3810                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3811                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3812                                 break;
3813                 }
3814         }
3815
3816         if (i >= 10000) {
3817                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
3818                        "and %s CPU\n",
3819                        tp->dev->name,
3820                        (offset == RX_CPU_BASE ? "RX" : "TX"));
3821                 return -ENODEV;
3822         }
3823         return 0;
3824 }
3825
3826 struct fw_info {
3827         unsigned int text_base;
3828         unsigned int text_len;
3829         u32 *text_data;
3830         unsigned int rodata_base;
3831         unsigned int rodata_len;
3832         u32 *rodata_data;
3833         unsigned int data_base;
3834         unsigned int data_len;
3835         u32 *data_data;
3836 };
3837
3838 /* tp->lock is held. */
3839 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
3840                                  int cpu_scratch_size, struct fw_info *info)
3841 {
3842         int err, i;
3843         u32 orig_tg3_flags = tp->tg3_flags;
3844         void (*write_op)(struct tg3 *, u32, u32);
3845
3846         if (cpu_base == TX_CPU_BASE &&
3847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3848                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
3849                        "TX cpu firmware on %s which is 5705.\n",
3850                        tp->dev->name);
3851                 return -EINVAL;
3852         }
3853
3854         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3855                 write_op = tg3_write_mem;
3856         else
3857                 write_op = tg3_write_indirect_reg32;
3858
3859         /* Force use of PCI config space for indirect register
3860          * write calls.
3861          */
3862         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
3863
3864         err = tg3_halt_cpu(tp, cpu_base);
3865         if (err)
3866                 goto out;
3867
3868         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3869                 write_op(tp, cpu_scratch_base + i, 0);
3870         tw32(cpu_base + CPU_STATE, 0xffffffff);
3871         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3872         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
3873                 write_op(tp, (cpu_scratch_base +
3874                               (info->text_base & 0xffff) +
3875                               (i * sizeof(u32))),
3876                          (info->text_data ?
3877                           info->text_data[i] : 0));
3878         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
3879                 write_op(tp, (cpu_scratch_base +
3880                               (info->rodata_base & 0xffff) +
3881                               (i * sizeof(u32))),
3882                          (info->rodata_data ?
3883                           info->rodata_data[i] : 0));
3884         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
3885                 write_op(tp, (cpu_scratch_base +
3886                               (info->data_base & 0xffff) +
3887                               (i * sizeof(u32))),
3888                          (info->data_data ?
3889                           info->data_data[i] : 0));
3890
3891         err = 0;
3892
3893 out:
3894         tp->tg3_flags = orig_tg3_flags;
3895         return err;
3896 }
3897
3898 /* tp->lock is held. */
3899 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3900 {
3901         struct fw_info info;
3902         int err, i;
3903
3904         info.text_base = TG3_FW_TEXT_ADDR;
3905         info.text_len = TG3_FW_TEXT_LEN;
3906         info.text_data = &tg3FwText[0];
3907         info.rodata_base = TG3_FW_RODATA_ADDR;
3908         info.rodata_len = TG3_FW_RODATA_LEN;
3909         info.rodata_data = &tg3FwRodata[0];
3910         info.data_base = TG3_FW_DATA_ADDR;
3911         info.data_len = TG3_FW_DATA_LEN;
3912         info.data_data = NULL;
3913
3914         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3915                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3916                                     &info);
3917         if (err)
3918                 return err;
3919
3920         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3921                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3922                                     &info);
3923         if (err)
3924                 return err;
3925
3926         /* Now startup only the RX cpu. */
3927         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3928         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
3929
3930         for (i = 0; i < 5; i++) {
3931                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
3932                         break;
3933                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3934                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3935                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
3936                 udelay(1000);
3937         }
3938         if (i >= 5) {
3939                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
3940                        "to set RX CPU PC, is %08x should be %08x\n",
3941                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
3942                        TG3_FW_TEXT_ADDR);
3943                 return -ENODEV;
3944         }
3945         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3946         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3947
3948         return 0;
3949 }
3950
3951 #if TG3_TSO_SUPPORT != 0
3952
3953 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
3954 #define TG3_TSO_FW_RELASE_MINOR         0x4
3955 #define TG3_TSO_FW_RELEASE_FIX          0x0
3956 #define TG3_TSO_FW_START_ADDR           0x08000000
3957 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
3958 #define TG3_TSO_FW_TEXT_LEN             0x1a90
3959 #define TG3_TSO_FW_RODATA_ADDR          0x08001a90
3960 #define TG3_TSO_FW_RODATA_LEN           0x60
3961 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
3962 #define TG3_TSO_FW_DATA_LEN             0x20
3963 #define TG3_TSO_FW_SBSS_ADDR            0x08001b40
3964 #define TG3_TSO_FW_SBSS_LEN             0x2c
3965 #define TG3_TSO_FW_BSS_ADDR             0x08001b70
3966 #define TG3_TSO_FW_BSS_LEN              0x894
3967
3968 static u32 tg3TsoFwText[] = {
3969         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3970         0x37bd4000, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000010, 0x00000000,
3971         0x0000000d, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0x3c04fefe,
3972         0xafbf0018, 0x0e0005d4, 0x34840002, 0x0e000664, 0x00000000, 0x3c030800,
3973         0x90631b58, 0x24020002, 0x3c040800, 0x24841a9c, 0x14620003, 0x24050001,
3974         0x3c040800, 0x24841a90, 0x24060003, 0x00003821, 0xafa00010, 0x0e000678,
3975         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
3976         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
3977         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
3978         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
3979         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
3980         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001ef, 0x24040001,
3981         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
3982         0x90421b88, 0x14520003, 0x00000000, 0x0e0004bf, 0x00000000, 0x0a00003c,
3983         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
3984         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ab0, 0x00002821, 0x00003021,
3985         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000678, 0xafa00014, 0x3c040800,
3986         0x248423c8, 0xa4800000, 0x3c010800, 0xa0201b88, 0x3c010800, 0xac201b8c,
3987         0x3c010800, 0xac201b90, 0x3c010800, 0xac201b94, 0x3c010800, 0xac201b9c,
3988         0x3c010800, 0xac201ba8, 0x3c010800, 0xac201bac, 0x8f624434, 0x3c010800,
3989         0xac221b78, 0x8f624438, 0x3c010800, 0xac221b7c, 0x8f624410, 0xac80f7a8,
3990         0x3c010800, 0xac201b74, 0x3c010800, 0xac2023d0, 0x3c010800, 0xac2023b8,
3991         0x3c010800, 0xac2023bc, 0x3c010800, 0xac2023f0, 0x3c010800, 0xac221b80,
3992         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
3993         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac2023fc,
3994         0xac820034, 0x3c040800, 0x24841abc, 0x3c050800, 0x8ca523fc, 0x00003021,
3995         0x00003821, 0xafa00010, 0x0e000678, 0xafa00014, 0x8fbf0018, 0x03e00008,
3996         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac8, 0x00002821, 0x00003021,
3997         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000678, 0xafa00014, 0x0e00005b,
3998         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
3999         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4000         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4001         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4002         0x24631bac, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b88,
4003         0x14400118, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4004         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4005         0x3c030800, 0x90631b88, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4006         0x3c010800, 0xa0221b88, 0x00051100, 0x00821025, 0x3c010800, 0xac201b8c,
4007         0x3c010800, 0xac201b90, 0x3c010800, 0xac201b94, 0x3c010800, 0xac201b9c,
4008         0x3c010800, 0xac201ba8, 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4,
4009         0x3c010800, 0xa42223c8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222400,
4010         0x30428000, 0x3c010800, 0xa4231bb6, 0x10400005, 0x24020001, 0x3c010800,
4011         0xac2223e4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023e4,
4012         0x9622000a, 0x3c030800, 0x94631bb6, 0x3c010800, 0xac2023e0, 0x3c010800,
4013         0xac2023e8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4014         0xa42223c0, 0x3c010800, 0x0a000115, 0xa4231b86, 0x9622000c, 0x3c010800,
4015         0xa42223dc, 0x3c040800, 0x24841b8c, 0x8c820000, 0x00021100, 0x3c010800,
4016         0x00220821, 0xac311bb8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4017         0xac271bbc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4018         0x00220821, 0xac261bc0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4019         0xac291bc4, 0x96230008, 0x3c020800, 0x8c421b9c, 0x00432821, 0x3c010800,
4020         0xac251b9c, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4021         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4022         0x8c421b30, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b30, 0x2c620002,
4023         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4024         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b70,
4025         0x3c040800, 0x94841b84, 0x01221025, 0x3c010800, 0xa42223ca, 0x24020001,
4026         0x3c010800, 0xac221ba8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4027         0xac231b70, 0x3c010800, 0xa4251b84, 0x3c060800, 0x24c61b8c, 0x8cc20000,
4028         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000652,
4029         0x24040002, 0x0a0001e5, 0x00000000, 0x3c020800, 0x8c421ba8, 0x10400077,
4030         0x24020001, 0x3c050800, 0x90a51b88, 0x14a20071, 0x00000000, 0x3c150800,
4031         0x96b51b86, 0x3c040800, 0x8c841b9c, 0x32a3ffff, 0x0083102a, 0x1440006b,
4032         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523e0, 0x1060005b,
4033         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4034         0x3c110800, 0x02308821, 0x0e000621, 0x8e311bb8, 0x00402821, 0x10a00053,
4035         0x00000000, 0x9628000a, 0x31020040, 0x10400004, 0x2407180c, 0x8e22000c,
4036         0x2407188c, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bc0, 0x3c020800,
4037         0x00501021, 0x8c421bc4, 0x00031d00, 0x00021400, 0x00621825, 0xaca30014,
4038         0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, 0x00431021,
4039         0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000, 0x30c4ffff,
4040         0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, 0x8e63fff4,
4041         0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021, 0xae62fff4,
4042         0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, 0xae60fff4,
4043         0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008, 0x24020305,
4044         0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, 0x0a0001ca,
4045         0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223e0, 0x10400003, 0x3c024b65,
4046         0x0a0001d2, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, 0x30e2ffff,
4047         0xaca20010, 0x0e00059f, 0x00a02021, 0x3242ffff, 0x0054102b, 0x1440ffaa,
4048         0x00000000, 0x24020002, 0x3c010800, 0x0a0001e5, 0xa0221b88, 0x8ec2083c,
4049         0x24420001, 0x0a0001e5, 0xaec2083c, 0x0e0004bf, 0x00000000, 0x8fbf002c,
4050         0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
4051         0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028, 0xafb30024,
4052         0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, 0x3442fff8,
4053         0x3c060800, 0x24c61ba4, 0x02428824, 0x9623000e, 0x8cc20000, 0x00431021,
4054         0xacc20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821, 0x0e000637,
4055         0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4056         0x10400121, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x1040011c,
4057         0x00000000, 0x0a00020c, 0x00000000, 0x8e240008, 0x8e230014, 0x00041402,
4058         0x000241c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, 0x00031942,
4059         0x30637800, 0x00021100, 0x24424000, 0x00625021, 0x9542000a, 0x3084ffff,
4060         0x30420008, 0x104000b3, 0x000429c0, 0x3c020800, 0x8c4223f0, 0x1440002d,
4061         0x25050008, 0x95020014, 0x3c010800, 0xa42223c0, 0x8d070010, 0x00071402,
4062         0x3c010800, 0xa42223c2, 0x3c010800, 0xa42723c4, 0x9502000e, 0x30e3ffff,
4063         0x00431023, 0x3c010800, 0xac2223f8, 0x8f626800, 0x3c030010, 0x00431024,
4064         0x10400005, 0x00000000, 0x9503001a, 0x9502001c, 0x0a000241, 0x00431021,
4065         0x9502001a, 0x3c010800, 0xac2223ec, 0x3c02c000, 0x02421825, 0x3c010800,
4066         0xac2823f0, 0x3c010800, 0xac3223f4, 0xaf635c9c, 0x8f625c90, 0x30420002,
4067         0x104000df, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000da,
4068         0x00000000, 0x0a00024e, 0x00000000, 0x9502000e, 0x3c030800, 0x946323c4,
4069         0x00434823, 0x3123ffff, 0x2c620008, 0x1040001c, 0x00000000, 0x95020014,
4070         0x24420028, 0x00a22821, 0x00031042, 0x1840000b, 0x00002021, 0x24c60848,
4071         0x00403821, 0x94a30000, 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000,
4072         0x0087102a, 0x1440fff9, 0x24a50002, 0x31220001, 0x1040001f, 0x3c024000,
4073         0x3c040800, 0x248423ec, 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021,
4074         0x0a00028d, 0xac820000, 0x8f626800, 0x3c030010, 0x00431024, 0x10400009,
4075         0x00000000, 0x9502001a, 0x3c030800, 0x8c6323ec, 0x00431021, 0x3c010800,
4076         0xac2223ec, 0x0a00028e, 0x3c024000, 0x9502001a, 0x9504001c, 0x3c030800,
4077         0x8c6323ec, 0x00441023, 0x00621821, 0x3c010800, 0xac2323ec, 0x3c024000,
4078         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000,
4079         0x9542000a, 0x30420010, 0x10400095, 0x00000000, 0x3c060800, 0x24c623f0,
4080         0x3c020800, 0x944223c4, 0x8cc50000, 0x3c040800, 0x8c8423f8, 0x24420030,
4081         0x00a22821, 0x94a20004, 0x3c030800, 0x8c6323ec, 0x00441023, 0x00621821,
4082         0x00603821, 0x00032402, 0x30e2ffff, 0x00823821, 0x00071402, 0x00e23821,
4083         0x00071027, 0x3c010800, 0xac2323ec, 0xa4a20006, 0x3c030800, 0x8c6323f4,
4084         0x3c0200ff, 0x3442fff8, 0x00628824, 0x96220008, 0x24040001, 0x24034000,
4085         0x000241c0, 0x00e01021, 0xa502001a, 0xa500001c, 0xacc00000, 0x3c010800,
4086         0xac241b50, 0xaf635cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4087         0x3c010800, 0xac201b50, 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002,
4088         0x10400003, 0x00000000, 0x3c010800, 0xac201b50, 0x3c020800, 0x8c421b50,
4089         0x1040ffec, 0x00000000, 0x3c040800, 0x0e000637, 0x8c8423f4, 0x0a00032c,
4090         0x00000000, 0x3c030800, 0x90631b88, 0x24020002, 0x14620003, 0x3c034b65,
4091         0x0a0002e3, 0x00008021, 0x8e22001c, 0x34637654, 0x10430002, 0x24100002,
4092         0x24100001, 0x01002021, 0x0e000352, 0x02003021, 0x24020003, 0x3c010800,
4093         0xa0221b88, 0x24020002, 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323e0,
4094         0x10620006, 0x00000000, 0x3c020800, 0x944223c8, 0x00021400, 0x0a000321,
4095         0xae220014, 0x3c040800, 0x248423ca, 0x94820000, 0x00021400, 0xae220014,
4096         0x3c020800, 0x8c421bac, 0x3c03c000, 0x3c010800, 0xa0201b88, 0x00431025,
4097         0xaf625c5c, 0x8f625c50, 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2,
4098         0x8c820000, 0x00431025, 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa,
4099         0x00000000, 0x3c020800, 0x24421b74, 0x8c430000, 0x24630001, 0xac430000,
4100         0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14,
4101         0x3c020800, 0x8c421b30, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b30,
4102         0x2c620002, 0x1040fff7, 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c,
4103         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x12600003, 0x00000000,
4104         0x0e0004bf, 0x00000000, 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
4105         0x8fb00018, 0x03e00008, 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b78,
4106         0x8c820000, 0x00031c02, 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004,
4107         0x8f624450, 0x00021c02, 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444,
4108         0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008,
4109         0x3042ffff, 0x3c024000, 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002,
4110         0x1440fffc, 0x00000000, 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821,
4111         0x14c00011, 0x256e0008, 0x3c020800, 0x8c4223e4, 0x10400007, 0x24020016,
4112         0x3c010800, 0xa42223c2, 0x2402002a, 0x3c010800, 0x0a000366, 0xa42223c4,
4113         0x8d670010, 0x00071402, 0x3c010800, 0xa42223c2, 0x3c010800, 0xa42723c4,
4114         0x3c040800, 0x948423c4, 0x3c030800, 0x946323c2, 0x95cf0006, 0x3c020800,
4115         0x944223c0, 0x00832023, 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821,
4116         0x3082ffff, 0x14c0001a, 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800,
4117         0xa42223c6, 0x95820004, 0x95830006, 0x3c010800, 0xac2023d4, 0x3c010800,
4118         0xac2023d8, 0x00021400, 0x00431025, 0x3c010800, 0xac221bb0, 0x95220004,
4119         0x3c010800, 0xa4221bb4, 0x95230002, 0x01e51023, 0x0043102a, 0x10400010,
4120         0x24020001, 0x3c010800, 0x0a00039a, 0xac2223e8, 0x3c030800, 0x8c6323d8,
4121         0x3c020800, 0x94421bb4, 0x00431021, 0xa5220004, 0x3c020800, 0x94421bb0,
4122         0xa5820004, 0x3c020800, 0x8c421bb0, 0xa5820006, 0x3c020800, 0x8c4223e0,
4123         0x3c0d0800, 0x8dad23d4, 0x3c0a0800, 0x144000e5, 0x8d4a23d8, 0x3c020800,
4124         0x94421bb4, 0x004a1821, 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d,
4125         0x01435023, 0x3c020800, 0x944223c6, 0x30420009, 0x10400008, 0x00000000,
4126         0x9582000c, 0x3042fff6, 0xa582000c, 0x3c020800, 0x944223c6, 0x30420009,
4127         0x01a26823, 0x3c020800, 0x8c4223e8, 0x1040004a, 0x01203821, 0x3c020800,
4128         0x944223c2, 0x00004021, 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff,
4129         0x00021042, 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001,
4130         0x00c23021, 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff,
4131         0x00623021, 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a,
4132         0x00003021, 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021,
4133         0x2d020004, 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009,
4134         0x00442023, 0x01803821, 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042,
4135         0x18400010, 0x00c33021, 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021,
4136         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4137         0x00625824, 0x25670008, 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001,
4138         0x10400005, 0x00061c02, 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02,
4139         0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x0a00047f, 0x30c6ffff,
4140         0x24020002, 0x14c20081, 0x00000000, 0x3c020800, 0x8c4223fc, 0x14400007,
4141         0x00000000, 0x3c020800, 0x944223c2, 0x95230002, 0x01e21023, 0x10620077,
4142         0x00000000, 0x3c020800, 0x944223c2, 0x01e21023, 0xa5220002, 0x3c020800,
4143         0x8c4223fc, 0x1040001a, 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b86,
4144         0x00e04021, 0x00072c02, 0x00aa2021, 0x00431023, 0x00823823, 0x00072402,
4145         0x30e2ffff, 0x00823821, 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800,
4146         0x948423c4, 0x00453023, 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021,
4147         0x00061c02, 0x30c2ffff, 0x0a00047f, 0x00623021, 0x01203821, 0x00004021,
4148         0x3082ffff, 0x00021042, 0x18400008, 0x00003021, 0x00401821, 0x94e20000,
4149         0x25080001, 0x00c23021, 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02,
4150         0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x00c02821, 0x00061027,
4151         0xa522000a, 0x00003021, 0x2527000c, 0x00004021, 0x94e20000, 0x25080001,
4152         0x00c23021, 0x2d020004, 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021,
4153         0x91230009, 0x00442023, 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800,
4154         0x948423c4, 0x00621821, 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021,
4155         0x00061c02, 0x3c020800, 0x944223c0, 0x00c34821, 0x00441023, 0x00021fc2,
4156         0x00431021, 0x00021043, 0x18400010, 0x00003021, 0x00402021, 0x94e20000,
4157         0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000,
4158         0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3,
4159         0x00000000, 0x3c020800, 0x944223dc, 0x00c23021, 0x3122ffff, 0x00c23021,
4160         0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x00c04021,
4161         0x00061027, 0xa5820010, 0xadc00014, 0x0a00049f, 0xadc00000, 0x8dc70010,
4162         0x00e04021, 0x11400007, 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff,
4163         0x00433021, 0x00061402, 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800,
4164         0x946323c4, 0x3102ffff, 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02,
4165         0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027,
4166         0xa5820010, 0x3102ffff, 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800,
4167         0x8c4223e4, 0x10400002, 0x25e2fff2, 0xa5c20034, 0x3c020800, 0x8c4223d8,
4168         0x3c040800, 0x8c8423d4, 0x24420001, 0x3c010800, 0xac2223d8, 0x3c020800,
4169         0x8c421bb0, 0x3303ffff, 0x00832021, 0x3c010800, 0xac2423d4, 0x00431821,
4170         0x0062102b, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223d4, 0x3c010800,
4171         0xac231bb0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800, 0x24a51b86,
4172         0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, 0xafb40030,
4173         0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000, 0x3c020800,
4174         0x944223c0, 0x3c030800, 0x8c631ba0, 0x3c040800, 0x8c841b9c, 0x01221023,
4175         0x0064182a, 0xa7a9001e, 0x106000bc, 0xa7a20016, 0x24be0022, 0x97b6001e,
4176         0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, 0x8fc2fff8,
4177         0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000ae, 0x00000000,
4178         0x97d50818, 0x32a2ffff, 0x104000a1, 0x00009021, 0x0040a021, 0x00008821,
4179         0x0e000621, 0x00000000, 0x00403021, 0x14c00007, 0x00000000, 0x3c020800,
4180         0x8c4223cc, 0x24420001, 0x3c010800, 0x0a000593, 0xac2223cc, 0x3c100800,
4181         0x02118021, 0x8e101bb8, 0x9608000a, 0x31020040, 0x10400004, 0x2407180c,
4182         0x8e02000c, 0x2407188c, 0xacc20018, 0x31020080, 0x54400001, 0x34e70010,
4183         0x3c020800, 0x00511021, 0x8c421bc0, 0x3c030800, 0x00711821, 0x8c631bc4,
4184         0x00021500, 0x00031c00, 0x00431025, 0xacc20014, 0x96040008, 0x3242ffff,
4185         0x00821021, 0x0282102a, 0x14400002, 0x02b22823, 0x00802821, 0x8e020000,
4186         0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, 0x26310010, 0xac820004,
4187         0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010, 0x24020305, 0x0e00059f,
4188         0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc6, 0x3242ffff, 0x0a00058b,
4189         0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, 0x10400066, 0x00000000,
4190         0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, 0x0e000621, 0x8e101bb8,
4191         0x00403021, 0x14c00005, 0x00000000, 0x8e62082c, 0x24420001, 0x0a000593,
4192         0xae62082c, 0x9608000a, 0x31020040, 0x10400004, 0x2407180c, 0x8e02000c,
4193         0x2407188c, 0xacc20018, 0x3c020800, 0x00511021, 0x8c421bc0, 0x3c030800,
4194         0x00711821, 0x8c631bc4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4195         0x8e63fff4, 0x96020008, 0x00432023, 0x3242ffff, 0x3083ffff, 0x00431021,
4196         0x02c2102a, 0x10400003, 0x00802821, 0x97a9001e, 0x01322823, 0x8e620000,
4197         0x30a4ffff, 0x00441021, 0xae620000, 0xa4c5000e, 0x8e020000, 0xacc20000,
4198         0x8e020004, 0x8e63fff4, 0x00431021, 0xacc20004, 0x8e63fff4, 0x96020008,
4199         0x00641821, 0x0062102a, 0x14400006, 0x02459021, 0x8e62fff0, 0xae60fff4,
4200         0x24420001, 0x0a00056e, 0xae62fff0, 0xae63fff4, 0xacc00008, 0x3242ffff,
4201         0x10560003, 0x31020004, 0x10400006, 0x24020305, 0x31020080, 0x54400001,
4202         0x34e70010, 0x34e70020, 0x24020905, 0xa4c2000c, 0x8ee30000, 0x8ee20004,
4203         0x14620007, 0x3c02b49a, 0x8ee20860, 0x54400001, 0x34e70400, 0x3c024b65,
4204         0x0a000585, 0x34427654, 0x344289ab, 0xacc2001c, 0x30e2ffff, 0xacc20010,
4205         0x0e00059f, 0x00c02021, 0x3242ffff, 0x0056102b, 0x1440ff9c, 0x00000000,
4206         0x8e620000, 0x8e63fffc, 0x0043102a, 0x1440ff4a, 0x00000000, 0x8fbf0044,
4207         0x8fbe0040, 0x8fb7003c, 0x8fb60038, 0x8fb50034, 0x8fb40030, 0x8fb3002c,
4208         0x8fb20028, 0x8fb10024, 0x8fb00020, 0x03e00008, 0x27bd0048, 0x27bdffe8,
4209         0xafbf0014, 0xafb00010, 0x8f624450, 0x8f634410, 0x0a0005ae, 0x00808021,
4210         0x8f626820, 0x30422000, 0x10400003, 0x00000000, 0x0e0001ef, 0x00002021,
4211         0x8f624450, 0x8f634410, 0x3042ffff, 0x0043102b, 0x1440fff5, 0x00000000,
4212         0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000b, 0x00000000, 0x8f630c14,
4213         0x3c020800, 0x8c421b30, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b30,
4214         0x2c620002, 0x1040fff7, 0x00000000, 0xaf705c18, 0x8f625c10, 0x30420002,
4215         0x10400009, 0x00000000, 0x8f626820, 0x30422000, 0x1040fff8, 0x00000000,
4216         0x0e0001ef, 0x00002021, 0x0a0005c1, 0x00000000, 0x8fbf0014, 0x8fb00010,
4217         0x03e00008, 0x27bd0018, 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000,
4218         0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804,
4219         0x8f634000, 0x24020b50, 0x3c010800, 0xac221b44, 0x24020b78, 0x3c010800,
4220         0xac221b54, 0x34630002, 0xaf634000, 0x0e000601, 0x00808021, 0x3c010800,
4221         0xa0221b58, 0x304200ff, 0x24030002, 0x14430005, 0x00000000, 0x3c020800,
4222         0x8c421b44, 0x0a0005f4, 0xac5000c0, 0x3c020800, 0x8c421b44, 0xac5000bc,
4223         0x8f624434, 0x8f634438, 0x8f644410, 0x3c010800, 0xac221b4c, 0x3c010800,
4224         0xac231b5c, 0x3c010800, 0xac241b48, 0x8fbf0014, 0x8fb00010, 0x03e00008,
4225         0x27bd0018, 0x3c040800, 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003,
4226         0xac830000, 0x8cc20000, 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa,
4227         0xac830000, 0x8cc20000, 0x50430001, 0x24050001, 0x3c020800, 0xac470000,
4228         0x03e00008, 0x00a01021, 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c,
4229         0x8f62680c, 0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9,
4230         0x00000000, 0x03e00008, 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b4c,
4231         0x00031c02, 0x0043102b, 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b5c,
4232         0x8f624450, 0x00021c02, 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444,
4233         0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008,
4234         0x3042ffff, 0x3082ffff, 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000,
4235         0x0a000644, 0x2402ffff, 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002,
4236         0x1440fffc, 0x00001021, 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800,
4237         0x8c631b48, 0x0a00064d, 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b,
4238         0x1440fffc, 0x00000000, 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821,
4239         0x3c040800, 0x24841ae0, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4240         0x0e000678, 0xafa00014, 0x0a00065c, 0x00000000, 0x8fbf0018, 0x03e00008,
4241         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4242         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b64,
4243         0x24020040, 0x3c010800, 0xac221b68, 0x3c010800, 0xac201b60, 0xac600000,
4244         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4245         0x00804821, 0x8faa0010, 0x3c020800, 0x8c421b60, 0x3c040800, 0x8c841b68,
4246         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac231b60, 0x14400003,
4247         0x00004021, 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x3c030800,
4248         0x8c631b64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4249         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b60,
4250         0x3c030800, 0x8c631b64, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4251         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4252         0x00000000, 0x00000000,
4253 };
4254
4255 u32 tg3TsoFwRodata[] = {
4256         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4257         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4258         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4259         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4260 };
4261
4262 #if 0 /* All zeros, don't eat up space with it. */
4263 u32 tg3TsoFwData[] = {
4264         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4265         0x00000000, 0x00000000, 0x00000000
4266 };
4267 #endif
4268
4269 /* 5705 needs a special version of the TSO firmware.  */
4270 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4271 #define TG3_TSO5_FW_RELASE_MINOR        0x1
4272 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4273 #define TG3_TSO5_FW_START_ADDR          0x00010000
4274 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4275 #define TG3_TSO5_FW_TEXT_LEN            0xeb0
4276 #define TG3_TSO5_FW_RODATA_ADDR         0x00010eb0
4277 #define TG3_TSO5_FW_RODATA_LEN          0x50
4278 #define TG3_TSO5_FW_DATA_ADDR           0x00010f20
4279 #define TG3_TSO5_FW_DATA_LEN            0x20
4280 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f40
4281 #define TG3_TSO5_FW_SBSS_LEN            0x28
4282 #define TG3_TSO5_FW_BSS_ADDR            0x00010f70
4283 #define TG3_TSO5_FW_BSS_LEN             0x88
4284
4285 static u32 tg3Tso5FwText[] = {
4286         0x0c004003, 0x00000000, 0x00010f30, 0x00000000, 0x10000003, 0x00000000,
4287         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4288         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4289         0xafbf0018, 0x0c0042f0, 0x34840002, 0x0c00436c, 0x00000000, 0x3c030001,
4290         0x90630f54, 0x24020002, 0x3c040001, 0x24840ebc, 0x14620003, 0x24050001,
4291         0x3c040001, 0x24840eb0, 0x24060001, 0x00003821, 0xafa00010, 0x0c004380,
4292         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4293         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4294         0x0c0042d3, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4295         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4296         0x0c004064, 0x00000000, 0x3c020001, 0x90420f76, 0x10510003, 0x32020200,
4297         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4298         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4299         0x27bdffe0, 0x3c040001, 0x24840ed0, 0x00002821, 0x00003021, 0x00003821,
4300         0xafbf0018, 0xafa00010, 0x0c004380, 0xafa00014, 0x0000d021, 0x24020130,
4301         0xaf625000, 0x3c010001, 0xa4200f70, 0x3c010001, 0xa0200f77, 0x8fbf0018,
4302         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f80,
4303         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4304         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4305         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4306         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f9a, 0x00041402,
4307         0xa0a20000, 0x3c010001, 0xa0240f9b, 0x3c020001, 0x00431021, 0x94428014,
4308         0x3c010001, 0xa0220f9c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4309         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f80, 0x0124102b,
4310         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4311         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4312         0x24c60008, 0x00003821, 0x3c080001, 0x25080f9b, 0x91060000, 0x3c020001,
4313         0x90420f9c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4314         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4315         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4316         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4317         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4318         0x080040fa, 0xac220fa0, 0x3c050001, 0x24a50f9c, 0x90a20000, 0x3c0c0001,
4319         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4320         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4321         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f9c,
4322         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4323         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4324         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4325         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4326         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4327         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4328         0x90420f9c, 0x3c030001, 0x90630f9a, 0x00e2c823, 0x3c020001, 0x90420f9b,
4329         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4330         0x3c010001, 0xa4220f98, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f96,
4331         0x3c010001, 0xa4200f92, 0x00021400, 0x00431025, 0x3c010001, 0xac220f8c,
4332         0x95020004, 0x3c010001, 0x08004124, 0xa4220f90, 0x3c020001, 0x94420f90,
4333         0x3c030001, 0x94630f92, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f8c,
4334         0xa4c20004, 0x3c020001, 0x8c420f8c, 0xa4c20006, 0x3c040001, 0x94840f92,
4335         0x3c020001, 0x94420f90, 0x3c0a0001, 0x954a0f96, 0x00441821, 0x3063ffff,
4336         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f98,
4337         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f98,
4338         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4339         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4340         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0fa0, 0x10800005,
4341         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4342         0xa502000a, 0x3c030001, 0x90630f9b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4343         0x00432023, 0x3c020001, 0x94420fa0, 0x00442021, 0x00041c02, 0x3082ffff,
4344         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4345         0x24a50f9a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4346         0x00e21023, 0xa5020002, 0x3c030001, 0x94630fa0, 0x3c020001, 0x94420f7a,
4347         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4348         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f9c, 0x24620001,
4349         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4350         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4351         0x94420fa2, 0x3183ffff, 0x3c040001, 0x90840f9b, 0x00431021, 0x00e21021,
4352         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4353         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4354         0x00431025, 0x3c040001, 0x24840f92, 0xade20010, 0x94820000, 0x3c050001,
4355         0x94a50f96, 0x3c030001, 0x8c630f8c, 0x24420001, 0x00b92821, 0xa4820000,
4356         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f96, 0x10600003,
4357         0x24a2ffff, 0x3c010001, 0xa4220f96, 0x3c024000, 0x03021025, 0x3c010001,
4358         0xac240f8c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f76,
4359         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4360         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f84,
4361         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4362         0x24020008, 0x3c010001, 0xa4220f88, 0x30620004, 0x10400005, 0x24020001,
4363         0x3c010001, 0xa0220f77, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f77,
4364         0x00031402, 0x3c010001, 0xa4220f74, 0x9483000c, 0x24020001, 0x3c010001,
4365         0xa4200f70, 0x3c010001, 0xa0220f76, 0x3c010001, 0xa4230f82, 0x24020001,
4366         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4367         0x080042cf, 0x00000000, 0x3c020001, 0x94420f82, 0x241a0001, 0x3c010001,
4368         0xa4200f7e, 0x3c010001, 0xa4200f72, 0x304407ff, 0x00021bc2, 0x00031823,
4369         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4370         0xa4240f78, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f7a, 0x3c010001,
4371         0xa4230f7c, 0x3c060001, 0x24c60f72, 0x94c50000, 0x94c30002, 0x3c040001,
4372         0x94840f7a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4373         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f76, 0x8f641008,
4374         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4375         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4376         0x94630f70, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4377         0xa4230f70, 0xaf620ce8, 0x3c020001, 0x94420f88, 0x34420024, 0xaf620cec,
4378         0x94c30002, 0x3c020001, 0x94420f70, 0x14620012, 0x3c028000, 0x3c108000,
4379         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f76, 0x8f641008, 0x00901024,
4380         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4381         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4382         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4383         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4384         0x3c070001, 0x24e70f70, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4385         0x8c420f84, 0xaf620ce4, 0x3c050001, 0x94a50f74, 0x94e30000, 0x3c040001,
4386         0x94840f78, 0x3c020001, 0x94420f7e, 0x00a32823, 0x00822023, 0x30a6ffff,
4387         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f7c,
4388         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f74,
4389         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4390         0x90420f77, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f88, 0x34630624,
4391         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f88, 0x3c030008, 0x34630624,
4392         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4393         0xa0200f76, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4394         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4395         0x00000000, 0x3c030001, 0x94630f88, 0x34420624, 0x3c108000, 0x00621825,
4396         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4397         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4398         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f7e, 0x3c020001, 0x94420f7c,
4399         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f77, 0x10400009,
4400         0x3c03000c, 0x3c020001, 0x94420f88, 0x34630624, 0x0000d021, 0x00431025,
4401         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f88, 0x3c030008,
4402         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f7e, 0x00451021,
4403         0x3c010001, 0xa4220f7e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4404         0xa0200f76, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4405         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4406         0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdffe0, 0x3c040001, 0x24840ee0,
4407         0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004380,
4408         0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, 0xa4200f70,
4409         0x3c010001, 0xa0200f77, 0x8f636804, 0x3c020001, 0x3442e000, 0x00621824,
4410         0x3c020001, 0x14620003, 0x00000000, 0x080042eb, 0x00000000, 0x8fbf0018,
4411         0x03e00008, 0x27bd0020, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4412         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4413         0x3c010001, 0xac220f40, 0x24020b78, 0x3c010001, 0xac220f50, 0x34630002,
4414         0xaf634000, 0x0c00431d, 0x00808021, 0x3c010001, 0xa0220f54, 0x304200ff,
4415         0x24030002, 0x14430005, 0x00000000, 0x3c020001, 0x8c420f40, 0x08004310,
4416         0xac5000c0, 0x3c020001, 0x8c420f40, 0xac5000bc, 0x8f624434, 0x8f634438,
4417         0x8f644410, 0x3c010001, 0xac220f48, 0x3c010001, 0xac230f58, 0x3c010001,
4418         0xac240f44, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008,
4419         0x24020001, 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c,
4420         0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000,
4421         0x03e00008, 0x27bd0008, 0x8f634450, 0x3c020001, 0x8c420f48, 0x00031c02,
4422         0x0043102b, 0x14400008, 0x3c038000, 0x3c040001, 0x8c840f58, 0x8f624450,
4423         0x00021c02, 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444,
4424         0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff,
4425         0x3082ffff, 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0800434f,
4426         0x2402ffff, 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc,
4427         0x00001021, 0x03e00008, 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f44,
4428         0x08004358, 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc,
4429         0x00000000, 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001,
4430         0x24840ef0, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004380,
4431         0xafa00014, 0x08004367, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4432         0x3c020001, 0x3442d600, 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff,
4433         0x3c010001, 0xac220f60, 0x24020040, 0x3c010001, 0xac220f64, 0x3c010001,
4434         0xac200f5c, 0xac600000, 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000,
4435         0x03e00008, 0x00000000, 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f5c,
4436         0x3c040001, 0x8c840f64, 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001,
4437         0xac230f5c, 0x14400003, 0x00004021, 0x3c010001, 0xac200f5c, 0x3c020001,
4438         0x8c420f5c, 0x3c030001, 0x8c630f60, 0x91240000, 0x00021140, 0x00431021,
4439         0x00481021, 0x25080001, 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001,
4440         0x3c020001, 0x8c420f5c, 0x3c030001, 0x8c630f60, 0x8f64680c, 0x00021140,
4441         0x00431021, 0xac440008, 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018,
4442         0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4443 };
4444
4445 u32 tg3Tso5FwRodata[] = {
4446         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4447         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4448         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4449         0x00000000, 0x00000000, 0x00000000
4450 };
4451
4452 u32 tg3Tso5FwData[] = {
4453         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x73746b6f, 
4454         0x66666c64, 0x5f76312e, 0x312e3000, 0x00000000
4455 };
4456
4457 /* tp->lock is held. */
4458 static int tg3_load_tso_firmware(struct tg3 *tp)
4459 {
4460         struct fw_info info;
4461         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4462         int err, i;
4463
4464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4465                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4466                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4467                 info.text_data = &tg3Tso5FwText[0];
4468                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4469                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4470                 info.rodata_data = &tg3Tso5FwRodata[0];
4471                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4472                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4473                 info.data_data = &tg3Tso5FwData[0];
4474                 cpu_base = RX_CPU_BASE;
4475                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4476                 cpu_scratch_size = (info.text_len +
4477                                     info.rodata_len +
4478                                     info.data_len +
4479                                     TG3_TSO5_FW_SBSS_LEN +
4480                                     TG3_TSO5_FW_BSS_LEN);
4481         } else {
4482                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4483                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4484                 info.text_data = &tg3TsoFwText[0];
4485                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4486                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4487                 info.rodata_data = &tg3TsoFwRodata[0];
4488                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4489                 info.data_len = TG3_TSO_FW_DATA_LEN;
4490                 info.data_data = NULL;
4491                 cpu_base = TX_CPU_BASE;
4492                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4493                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4494         }
4495
4496         err = tg3_load_firmware_cpu(tp, cpu_base,
4497                                     cpu_scratch_base, cpu_scratch_size,
4498                                     &info);
4499         if (err)
4500                 return err;
4501
4502         /* Now startup the cpu. */
4503         tw32(cpu_base + CPU_STATE, 0xffffffff);
4504         tw32_f(cpu_base + CPU_PC,    info.text_base);
4505
4506         for (i = 0; i < 5; i++) {
4507                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4508                         break;
4509                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4510                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4511                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4512                 udelay(1000);
4513         }
4514         if (i >= 5) {
4515                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4516                        "to set CPU PC, is %08x should be %08x\n",
4517                        tp->dev->name, tr32(cpu_base + CPU_PC),
4518                        info.text_base);
4519                 return -ENODEV;
4520         }
4521         tw32(cpu_base + CPU_STATE, 0xffffffff);
4522         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4523         return 0;
4524 }
4525
4526 #endif /* TG3_TSO_SUPPORT != 0 */
4527
4528 /* tp->lock is held. */
4529 static void __tg3_set_mac_addr(struct tg3 *tp)
4530 {
4531         u32 addr_high, addr_low;
4532         int i;
4533
4534         addr_high = ((tp->dev->dev_addr[0] << 8) |
4535                      tp->dev->dev_addr[1]);
4536         addr_low = ((tp->dev->dev_addr[2] << 24) |
4537                     (tp->dev->dev_addr[3] << 16) |
4538                     (tp->dev->dev_addr[4] <<  8) |
4539                     (tp->dev->dev_addr[5] <<  0));
4540         for (i = 0; i < 4; i++) {
4541                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4542                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4543         }
4544
4545         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4546             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4547             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4548                 for (i = 0; i < 12; i++) {
4549                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4550                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4551                 }
4552         }
4553
4554         addr_high = (tp->dev->dev_addr[0] +
4555                      tp->dev->dev_addr[1] +
4556                      tp->dev->dev_addr[2] +
4557                      tp->dev->dev_addr[3] +
4558                      tp->dev->dev_addr[4] +
4559                      tp->dev->dev_addr[5]) &
4560                 TX_BACKOFF_SEED_MASK;
4561         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4562 }
4563
4564 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4565 {
4566         struct tg3 *tp = dev->priv;
4567         struct sockaddr *addr = p;
4568
4569         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4570
4571         spin_lock_irq(&tp->lock);
4572         __tg3_set_mac_addr(tp);
4573         spin_unlock_irq(&tp->lock);
4574
4575         return 0;
4576 }
4577
4578 /* tp->lock is held. */
4579 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4580                            dma_addr_t mapping, u32 maxlen_flags,
4581                            u32 nic_addr)
4582 {
4583         tg3_write_mem(tp,
4584                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4585                       ((u64) mapping >> 32));
4586         tg3_write_mem(tp,
4587                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4588                       ((u64) mapping & 0xffffffff));
4589         tg3_write_mem(tp,
4590                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4591                        maxlen_flags);
4592
4593         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4594                 tg3_write_mem(tp,
4595                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4596                               nic_addr);
4597 }
4598
4599 static void __tg3_set_rx_mode(struct net_device *);
4600
4601 /* tp->lock is held. */
4602 static int tg3_reset_hw(struct tg3 *tp)
4603 {
4604         u32 val, rdmac_mode;
4605         int i, err, limit;
4606
4607         tg3_disable_ints(tp);
4608
4609         tg3_stop_fw(tp);
4610
4611         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4612                 err = tg3_abort_hw(tp);
4613                 if (err)
4614                         return err;
4615         }
4616
4617         err = tg3_chip_reset(tp);
4618         if (err)
4619                 return err;
4620
4621         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
4622                 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4623                               DRV_STATE_START);
4624         else
4625                 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4626                               DRV_STATE_SUSPEND);
4627
4628         /* This works around an issue with Athlon chipsets on
4629          * B3 tigon3 silicon.  This bit has no effect on any
4630          * other revision.
4631          */
4632         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4633         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4634
4635         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4636             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4637                 val = tr32(TG3PCI_PCISTATE);
4638                 val |= PCISTATE_RETRY_SAME_DMA;
4639                 tw32(TG3PCI_PCISTATE, val);
4640         }
4641
4642         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
4643                 /* Enable some hw fixes.  */
4644                 val = tr32(TG3PCI_MSI_DATA);
4645                 val |= (1 << 26) | (1 << 28) | (1 << 29);
4646                 tw32(TG3PCI_MSI_DATA, val);
4647         }
4648
4649         /* Descriptor ring init may make accesses to the
4650          * NIC SRAM area to setup the TX descriptors, so we
4651          * can only do this after the hardware has been
4652          * successfully reset.
4653          */
4654         tg3_init_rings(tp);
4655
4656         /* This value is determined during the probe time DMA
4657          * engine test, tg3_test_dma.
4658          */
4659         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4660
4661         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4662                           GRC_MODE_4X_NIC_SEND_RINGS |
4663                           GRC_MODE_NO_TX_PHDR_CSUM |
4664                           GRC_MODE_NO_RX_PHDR_CSUM);
4665         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4666                 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4667         else
4668                 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4669         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4670                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4671         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4672                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4673
4674         tw32(GRC_MODE,
4675              tp->grc_mode |
4676              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4677
4678         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4679         val = tr32(GRC_MISC_CFG);
4680         val &= ~0xff;
4681         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
4682         tw32(GRC_MISC_CFG, val);
4683
4684         /* Initialize MBUF/DESC pool. */
4685         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4686                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4687                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4688                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4689                 else
4690                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4691                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4692                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4693         }
4694 #if TG3_TSO_SUPPORT != 0
4695         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4696                 int fw_len;
4697
4698                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4699                           TG3_TSO5_FW_RODATA_LEN +
4700                           TG3_TSO5_FW_DATA_LEN +
4701                           TG3_TSO5_FW_SBSS_LEN +
4702                           TG3_TSO5_FW_BSS_LEN);
4703                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4704                 tw32(BUFMGR_MB_POOL_ADDR,
4705                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4706                 tw32(BUFMGR_MB_POOL_SIZE,
4707                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4708         }
4709 #endif
4710
4711         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4712                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4713                      tp->bufmgr_config.mbuf_read_dma_low_water);
4714                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4715                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4716                 tw32(BUFMGR_MB_HIGH_WATER,
4717                      tp->bufmgr_config.mbuf_high_water);
4718         } else {
4719                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4720                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4721                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4722                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4723                 tw32(BUFMGR_MB_HIGH_WATER,
4724                      tp->bufmgr_config.mbuf_high_water_jumbo);
4725         }
4726         tw32(BUFMGR_DMA_LOW_WATER,
4727              tp->bufmgr_config.dma_low_water);
4728         tw32(BUFMGR_DMA_HIGH_WATER,
4729              tp->bufmgr_config.dma_high_water);
4730
4731         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4732         for (i = 0; i < 2000; i++) {
4733                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4734                         break;
4735                 udelay(10);
4736         }
4737         if (i >= 2000) {
4738                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4739                        tp->dev->name);
4740                 return -ENODEV;
4741         }
4742
4743         /* Clear statistics/status block in chip, and status block in ram. */
4744         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4745                 for (i = NIC_SRAM_STATS_BLK;
4746                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
4747                      i += sizeof(u32)) {
4748                         tg3_write_mem(tp, i, 0);
4749                         udelay(40);
4750                 }
4751         }
4752         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4753
4754         /* Setup replenish threshold. */
4755         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4756
4757         /* Initialize TG3_BDINFO's at:
4758          *  RCVDBDI_STD_BD:     standard eth size rx ring
4759          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
4760          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
4761          *
4762          * like so:
4763          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
4764          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
4765          *                              ring attribute flags
4766          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
4767          *
4768          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4769          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4770          *
4771          * The size of each ring is fixed in the firmware, but the location is
4772          * configurable.
4773          */
4774         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4775              ((u64) tp->rx_std_mapping >> 32));
4776         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4777              ((u64) tp->rx_std_mapping & 0xffffffff));
4778         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
4779              NIC_SRAM_RX_BUFFER_DESC);
4780
4781         /* Don't even try to program the JUMBO/MINI buffer descriptor
4782          * configs on 5705.
4783          */
4784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4785                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4786                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
4787         } else {
4788                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4789                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4790
4791                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
4792                      BDINFO_FLAGS_DISABLED);
4793
4794                 /* Setup replenish threshold. */
4795                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
4796
4797                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
4798                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4799                              ((u64) tp->rx_jumbo_mapping >> 32));
4800                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4801                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
4802                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4803                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4804                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
4805                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
4806                 } else {
4807                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4808                              BDINFO_FLAGS_DISABLED);
4809                 }
4810
4811         }
4812
4813         /* There is only one send ring on 5705, no need to explicitly
4814          * disable the others.
4815          */
4816         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4817                 /* Clear out send RCB ring in SRAM. */
4818                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
4819                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4820                                       BDINFO_FLAGS_DISABLED);
4821         }
4822
4823         tp->tx_prod = 0;
4824         tp->tx_cons = 0;
4825         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4826         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4827
4828         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
4829                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4830                                tp->tx_desc_mapping,
4831                                (TG3_TX_RING_SIZE <<
4832                                 BDINFO_FLAGS_MAXLEN_SHIFT),
4833                                NIC_SRAM_TX_BUFFER_DESC);
4834         } else {
4835                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4836                                0,
4837                                BDINFO_FLAGS_DISABLED,
4838                                NIC_SRAM_TX_BUFFER_DESC);
4839         }
4840
4841         /* There is only one receive return ring on 5705, no need to explicitly
4842          * disable the others.
4843          */
4844         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4845                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
4846                      i += TG3_BDINFO_SIZE) {
4847                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4848                                       BDINFO_FLAGS_DISABLED);
4849                 }
4850         }
4851
4852         tp->rx_rcb_ptr = 0;
4853         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
4854
4855         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
4856                        tp->rx_rcb_mapping,
4857                        (TG3_RX_RCB_RING_SIZE(tp) <<
4858                         BDINFO_FLAGS_MAXLEN_SHIFT),
4859                        0);
4860
4861         tp->rx_std_ptr = tp->rx_pending;
4862         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4863                      tp->rx_std_ptr);
4864
4865         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
4866                                                 tp->rx_jumbo_pending : 0;
4867         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4868                      tp->rx_jumbo_ptr);
4869
4870         /* Initialize MAC address and backoff seed. */
4871         __tg3_set_mac_addr(tp);
4872
4873         /* MTU + ethernet header + FCS + optional VLAN tag */
4874         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
4875
4876         /* The slot time is changed by tg3_setup_phy if we
4877          * run at gigabit with half duplex.
4878          */
4879         tw32(MAC_TX_LENGTHS,
4880              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4881              (6 << TX_LENGTHS_IPG_SHIFT) |
4882              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4883
4884         /* Receive rules. */
4885         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
4886         tw32(RCVLPC_CONFIG, 0x0181);
4887
4888         /* Calculate RDMAC_MODE setting early, we need it to determine
4889          * the RCVLPC_STATE_ENABLE mask.
4890          */
4891         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
4892                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
4893                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
4894                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
4895                       RDMAC_MODE_LNGREAD_ENAB);
4896         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
4897                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
4898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4899                 if (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
4900                         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4901                                 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
4902                         } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
4903                                    !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
4904                                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
4905                         }
4906                 }
4907         }
4908
4909         /* Receive/send statistics. */
4910         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
4911             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
4912                 val = tr32(RCVLPC_STATS_ENABLE);
4913                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
4914                 tw32(RCVLPC_STATS_ENABLE, val);
4915         } else {
4916                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
4917         }
4918         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
4919         tw32(SNDDATAI_STATSENAB, 0xffffff);
4920         tw32(SNDDATAI_STATSCTRL,
4921              (SNDDATAI_SCTRL_ENABLE |
4922               SNDDATAI_SCTRL_FASTUPD));
4923
4924         /* Setup host coalescing engine. */
4925         tw32(HOSTCC_MODE, 0);
4926         for (i = 0; i < 2000; i++) {
4927                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
4928                         break;
4929                 udelay(10);
4930         }
4931
4932         tw32(HOSTCC_RXCOL_TICKS, 0);
4933         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
4934         tw32(HOSTCC_RXMAX_FRAMES, 1);
4935         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
4936         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4937                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
4938         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4939                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
4940         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
4941         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
4942
4943         /* set status block DMA address */
4944         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
4945              ((u64) tp->status_mapping >> 32));
4946         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
4947              ((u64) tp->status_mapping & 0xffffffff));
4948
4949         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4950                 /* Status/statistics block address.  See tg3_timer,
4951                  * the tg3_periodic_fetch_stats call there, and
4952                  * tg3_get_stats to see how this works for 5705 chips.
4953                  */
4954                 tw32(HOSTCC_STAT_COAL_TICKS,
4955                      DEFAULT_STAT_COAL_TICKS);
4956                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
4957                      ((u64) tp->stats_mapping >> 32));
4958                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
4959                      ((u64) tp->stats_mapping & 0xffffffff));
4960                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
4961                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
4962         }
4963
4964         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
4965
4966         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
4967         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
4968         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4969                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
4970
4971         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
4972                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
4973         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
4974         udelay(40);
4975
4976         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
4977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
4978                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
4979                                        GRC_LCLCTRL_GPIO_OUTPUT1);
4980         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
4981         udelay(100);
4982
4983         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
4984         tr32(MAILBOX_INTERRUPT_0);
4985
4986         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4987                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
4988                 udelay(40);
4989         }
4990
4991         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
4992                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
4993                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
4994                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
4995                WDMAC_MODE_LNGREAD_ENAB);
4996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
4997             (tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) != 0 &&
4998             !(tp->tg3_flags2 & TG3_FLG2_IS_5788))
4999                 val |= WDMAC_MODE_RX_ACCEL;
5000         tw32_f(WDMAC_MODE, val);
5001         udelay(40);
5002
5003         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5004                 val = tr32(TG3PCI_X_CAPS);
5005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5006                         val &= ~PCIX_CAPS_BURST_MASK;
5007                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5008                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5009                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5010                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5011                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5012                                 val |= (tp->split_mode_max_reqs <<
5013                                         PCIX_CAPS_SPLIT_SHIFT);
5014                 }
5015                 tw32(TG3PCI_X_CAPS, val);
5016         }
5017
5018         tw32_f(RDMAC_MODE, rdmac_mode);
5019         udelay(40);
5020
5021         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5022         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
5023                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5024         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5025         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5026         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5027         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5028         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5029         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5030         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5031
5032         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5033                 err = tg3_load_5701_a0_firmware_fix(tp);
5034                 if (err)
5035                         return err;
5036         }
5037
5038 #if TG3_TSO_SUPPORT != 0
5039         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5040                 err = tg3_load_tso_firmware(tp);
5041                 if (err)
5042                         return err;
5043         }
5044 #endif
5045
5046         tp->tx_mode = TX_MODE_ENABLE;
5047         tw32_f(MAC_TX_MODE, tp->tx_mode);
5048         udelay(100);
5049
5050         tp->rx_mode = RX_MODE_ENABLE;
5051         tw32_f(MAC_RX_MODE, tp->rx_mode);
5052         udelay(10);
5053
5054         if (tp->link_config.phy_is_low_power) {
5055                 tp->link_config.phy_is_low_power = 0;
5056                 tp->link_config.speed = tp->link_config.orig_speed;
5057                 tp->link_config.duplex = tp->link_config.orig_duplex;
5058                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5059         }
5060
5061         tp->mi_mode = MAC_MI_MODE_BASE;
5062         tw32_f(MAC_MI_MODE, tp->mi_mode);
5063         udelay(40);
5064
5065         tw32(MAC_LED_CTRL, 0);
5066         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5067         if (tp->phy_id == PHY_ID_SERDES) {
5068                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5069                 udelay(10);
5070         }
5071         tw32_f(MAC_RX_MODE, tp->rx_mode);
5072         udelay(10);
5073
5074         if (tp->phy_id == PHY_ID_SERDES) {
5075                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5076                         /* Set drive transmission level to 1.2V  */
5077                         val = tr32(MAC_SERDES_CFG);
5078                         val &= 0xfffff000;
5079                         val |= 0x880;
5080                         tw32(MAC_SERDES_CFG, val);
5081                 }
5082                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5083                         tw32(MAC_SERDES_CFG, 0x616000);
5084         }
5085
5086         /* Prevent chip from dropping frames when flow control
5087          * is enabled.
5088          */
5089         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5090
5091         err = tg3_setup_phy(tp, 1);
5092         if (err)
5093                 return err;
5094
5095         if (tp->phy_id != PHY_ID_SERDES) {
5096                 u32 tmp;
5097
5098                 /* Clear CRC stats. */
5099                 tg3_readphy(tp, 0x1e, &tmp);
5100                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5101                 tg3_readphy(tp, 0x14, &tmp);
5102         }
5103
5104         __tg3_set_rx_mode(tp->dev);
5105
5106         /* Initialize receive rules. */
5107         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5108         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5109         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5110         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5111
5112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
5113                 limit = 8;
5114         else
5115                 limit = 16;
5116         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5117                 limit -= 4;
5118         switch (limit) {
5119         case 16:
5120                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5121         case 15:
5122                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5123         case 14:
5124                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5125         case 13:
5126                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5127         case 12:
5128                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5129         case 11:
5130                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5131         case 10:
5132                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5133         case 9:
5134                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5135         case 8:
5136                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5137         case 7:
5138                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5139         case 6:
5140                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5141         case 5:
5142                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5143         case 4:
5144                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5145         case 3:
5146                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5147         case 2:
5148         case 1:
5149
5150         default:
5151                 break;
5152         };
5153
5154         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5155                 tg3_enable_ints(tp);
5156
5157         return 0;
5158 }
5159
5160 /* Called at device open time to get the chip ready for
5161  * packet processing.  Invoked with tp->lock held.
5162  */
5163 static int tg3_init_hw(struct tg3 *tp)
5164 {
5165         int err;
5166
5167         /* Force the chip into D0. */
5168         err = tg3_set_power_state(tp, 0);
5169         if (err)
5170                 goto out;
5171
5172         tg3_switch_clocks(tp);
5173
5174         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5175
5176         err = tg3_reset_hw(tp);
5177
5178 out:
5179         return err;
5180 }
5181
5182 #define TG3_STAT_ADD32(PSTAT, REG) \
5183 do {    u32 __val = tr32(REG); \
5184         (PSTAT)->low += __val; \
5185         if ((PSTAT)->low < __val) \
5186                 (PSTAT)->high += 1; \
5187 } while (0)
5188
5189 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5190 {
5191         struct tg3_hw_stats *sp = tp->hw_stats;
5192
5193         if (!netif_carrier_ok(tp->dev))
5194                 return;
5195
5196         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5197         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5198         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5199         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5200         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5201         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5202         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5203         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5204         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5205         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5206         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5207         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5208         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5209
5210         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5211         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5212         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5213         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5214         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5215         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5216         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5217         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5218         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5219         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5220         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5221         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5222         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5223         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5224 }
5225
5226 static void tg3_timer(unsigned long __opaque)
5227 {
5228         struct tg3 *tp = (struct tg3 *) __opaque;
5229         unsigned long flags;
5230
5231         spin_lock_irqsave(&tp->lock, flags);
5232         spin_lock(&tp->tx_lock);
5233
5234         /* All of this garbage is because when using non-tagged
5235          * IRQ status the mailbox/status_block protocol the chip
5236          * uses with the cpu is race prone.
5237          */
5238         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5239                 tw32(GRC_LOCAL_CTRL,
5240                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5241         } else {
5242                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5243                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5244         }
5245
5246         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5247                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5248                 spin_unlock(&tp->tx_lock);
5249                 spin_unlock_irqrestore(&tp->lock, flags);
5250                 schedule_work(&tp->reset_task);
5251                 return;
5252         }
5253
5254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
5255                 tg3_periodic_fetch_stats(tp);
5256
5257         /* This part only runs once per second. */
5258         if (!--tp->timer_counter) {
5259                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5260                         u32 mac_stat;
5261                         int phy_event;
5262
5263                         mac_stat = tr32(MAC_STATUS);
5264
5265                         phy_event = 0;
5266                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5267                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5268                                         phy_event = 1;
5269                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5270                                 phy_event = 1;
5271
5272                         if (phy_event)
5273                                 tg3_setup_phy(tp, 0);
5274                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5275                         u32 mac_stat = tr32(MAC_STATUS);
5276                         int need_setup = 0;
5277
5278                         if (netif_carrier_ok(tp->dev) &&
5279                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5280                                 need_setup = 1;
5281                         }
5282                         if (! netif_carrier_ok(tp->dev) &&
5283                             (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5284                                 need_setup = 1;
5285                         }
5286                         if (need_setup) {
5287                                 tw32_f(MAC_MODE,
5288                                      (tp->mac_mode &
5289                                       ~MAC_MODE_PORT_MODE_MASK));
5290                                 udelay(40);
5291                                 tw32_f(MAC_MODE, tp->mac_mode);
5292                                 udelay(40);
5293                                 tg3_setup_phy(tp, 0);
5294                         }
5295                 }
5296
5297                 tp->timer_counter = tp->timer_multiplier;
5298         }
5299
5300         /* Heartbeat is only sent once every 120 seconds.  */
5301         if (!--tp->asf_counter) {
5302                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5303                         u32 val;
5304
5305                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5306                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5307                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5308                         val = tr32(GRC_RX_CPU_EVENT);
5309                         val |= (1 << 14);
5310                         tw32(GRC_RX_CPU_EVENT, val);
5311                 }
5312                 tp->asf_counter = tp->asf_multiplier;
5313         }
5314
5315         spin_unlock(&tp->tx_lock);
5316         spin_unlock_irqrestore(&tp->lock, flags);
5317
5318         tp->timer.expires = jiffies + tp->timer_offset;
5319         add_timer(&tp->timer);
5320 }
5321
5322 static int tg3_open(struct net_device *dev)
5323 {
5324         struct tg3 *tp = dev->priv;
5325         int err;
5326
5327         spin_lock_irq(&tp->lock);
5328         spin_lock(&tp->tx_lock);
5329
5330         tg3_disable_ints(tp);
5331         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5332
5333         spin_unlock(&tp->tx_lock);
5334         spin_unlock_irq(&tp->lock);
5335
5336         /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5337          * tp->tg3_flags is accurate at that new place.
5338          */
5339         err = tg3_alloc_consistent(tp);
5340         if (err)
5341                 return err;
5342
5343         err = request_irq(dev->irq, tg3_interrupt,
5344                           SA_SHIRQ, dev->name, dev);
5345
5346         if (err) {
5347                 tg3_free_consistent(tp);
5348                 return err;
5349         }
5350
5351         spin_lock_irq(&tp->lock);
5352         spin_lock(&tp->tx_lock);
5353
5354         err = tg3_init_hw(tp);
5355         if (err) {
5356                 tg3_halt(tp);
5357                 tg3_free_rings(tp);
5358         } else {
5359                 tp->timer_offset = HZ / 10;
5360                 tp->timer_counter = tp->timer_multiplier = 10;
5361                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5362
5363                 init_timer(&tp->timer);
5364                 tp->timer.expires = jiffies + tp->timer_offset;
5365                 tp->timer.data = (unsigned long) tp;
5366                 tp->timer.function = tg3_timer;
5367                 add_timer(&tp->timer);
5368
5369                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5370         }
5371
5372         spin_unlock(&tp->tx_lock);
5373         spin_unlock_irq(&tp->lock);
5374
5375         if (err) {
5376                 free_irq(dev->irq, dev);
5377                 tg3_free_consistent(tp);
5378                 return err;
5379         }
5380
5381         spin_lock_irq(&tp->lock);
5382         spin_lock(&tp->tx_lock);
5383
5384         tg3_enable_ints(tp);
5385
5386         spin_unlock(&tp->tx_lock);
5387         spin_unlock_irq(&tp->lock);
5388
5389         netif_start_queue(dev);
5390
5391         return 0;
5392 }
5393
5394 #if 0
5395 /*static*/ void tg3_dump_state(struct tg3 *tp)
5396 {
5397         u32 val32, val32_2, val32_3, val32_4, val32_5;
5398         u16 val16;
5399         int i;
5400
5401         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5402         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5403         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5404                val16, val32);
5405
5406         /* MAC block */
5407         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5408                tr32(MAC_MODE), tr32(MAC_STATUS));
5409         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5410                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5411         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5412                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5413         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5414                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5415
5416         /* Send data initiator control block */
5417         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5418                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5419         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5420                tr32(SNDDATAI_STATSCTRL));
5421
5422         /* Send data completion control block */
5423         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5424
5425         /* Send BD ring selector block */
5426         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5427                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5428
5429         /* Send BD initiator control block */
5430         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5431                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5432
5433         /* Send BD completion control block */
5434         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5435
5436         /* Receive list placement control block */
5437         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5438                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5439         printk("       RCVLPC_STATSCTRL[%08x]\n",
5440                tr32(RCVLPC_STATSCTRL));
5441
5442         /* Receive data and receive BD initiator control block */
5443         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5444                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5445
5446         /* Receive data completion control block */
5447         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5448                tr32(RCVDCC_MODE));
5449
5450         /* Receive BD initiator control block */
5451         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5452                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5453
5454         /* Receive BD completion control block */
5455         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5456                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5457
5458         /* Receive list selector control block */
5459         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5460                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5461
5462         /* Mbuf cluster free block */
5463         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5464                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5465
5466         /* Host coalescing control block */
5467         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5468                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5469         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5470                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5471                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5472         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5473                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5474                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5475         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5476                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5477         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5478                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5479
5480         /* Memory arbiter control block */
5481         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5482                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5483
5484         /* Buffer manager control block */
5485         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5486                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5487         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5488                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5489         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5490                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5491                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5492                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5493
5494         /* Read DMA control block */
5495         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5496                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5497
5498         /* Write DMA control block */
5499         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5500                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5501
5502         /* DMA completion block */
5503         printk("DEBUG: DMAC_MODE[%08x]\n",
5504                tr32(DMAC_MODE));
5505
5506         /* GRC block */
5507         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5508                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5509         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5510                tr32(GRC_LOCAL_CTRL));
5511
5512         /* TG3_BDINFOs */
5513         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5514                tr32(RCVDBDI_JUMBO_BD + 0x0),
5515                tr32(RCVDBDI_JUMBO_BD + 0x4),
5516                tr32(RCVDBDI_JUMBO_BD + 0x8),
5517                tr32(RCVDBDI_JUMBO_BD + 0xc));
5518         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5519                tr32(RCVDBDI_STD_BD + 0x0),
5520                tr32(RCVDBDI_STD_BD + 0x4),
5521                tr32(RCVDBDI_STD_BD + 0x8),
5522                tr32(RCVDBDI_STD_BD + 0xc));
5523         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5524                tr32(RCVDBDI_MINI_BD + 0x0),
5525                tr32(RCVDBDI_MINI_BD + 0x4),
5526                tr32(RCVDBDI_MINI_BD + 0x8),
5527                tr32(RCVDBDI_MINI_BD + 0xc));
5528
5529         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5530         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5531         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5532         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5533         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5534                val32, val32_2, val32_3, val32_4);
5535
5536         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5537         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5538         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5539         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5540         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5541                val32, val32_2, val32_3, val32_4);
5542
5543         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5544         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5545         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5546         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5547         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5548         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5549                val32, val32_2, val32_3, val32_4, val32_5);
5550
5551         /* SW status block */
5552         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5553                tp->hw_status->status,
5554                tp->hw_status->status_tag,
5555                tp->hw_status->rx_jumbo_consumer,
5556                tp->hw_status->rx_consumer,
5557                tp->hw_status->rx_mini_consumer,
5558                tp->hw_status->idx[0].rx_producer,
5559                tp->hw_status->idx[0].tx_consumer);
5560
5561         /* SW statistics block */
5562         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5563                ((u32 *)tp->hw_stats)[0],
5564                ((u32 *)tp->hw_stats)[1],
5565                ((u32 *)tp->hw_stats)[2],
5566                ((u32 *)tp->hw_stats)[3]);
5567
5568         /* Mailboxes */
5569         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5570                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5571                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5572                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5573                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5574
5575         /* NIC side send descriptors. */
5576         for (i = 0; i < 6; i++) {
5577                 unsigned long txd;
5578
5579                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5580                         + (i * sizeof(struct tg3_tx_buffer_desc));
5581                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5582                        i,
5583                        readl(txd + 0x0), readl(txd + 0x4),
5584                        readl(txd + 0x8), readl(txd + 0xc));
5585         }
5586
5587         /* NIC side RX descriptors. */
5588         for (i = 0; i < 6; i++) {
5589                 unsigned long rxd;
5590
5591                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5592                         + (i * sizeof(struct tg3_rx_buffer_desc));
5593                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5594                        i,
5595                        readl(rxd + 0x0), readl(rxd + 0x4),
5596                        readl(rxd + 0x8), readl(rxd + 0xc));
5597                 rxd += (4 * sizeof(u32));
5598                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5599                        i,
5600                        readl(rxd + 0x0), readl(rxd + 0x4),
5601                        readl(rxd + 0x8), readl(rxd + 0xc));
5602         }
5603
5604         for (i = 0; i < 6; i++) {
5605                 unsigned long rxd;
5606
5607                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5608                         + (i * sizeof(struct tg3_rx_buffer_desc));
5609                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5610                        i,
5611                        readl(rxd + 0x0), readl(rxd + 0x4),
5612                        readl(rxd + 0x8), readl(rxd + 0xc));
5613                 rxd += (4 * sizeof(u32));
5614                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5615                        i,
5616                        readl(rxd + 0x0), readl(rxd + 0x4),
5617                        readl(rxd + 0x8), readl(rxd + 0xc));
5618         }
5619 }
5620 #endif
5621
5622 static struct net_device_stats *tg3_get_stats(struct net_device *);
5623 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
5624
5625 static int tg3_close(struct net_device *dev)
5626 {
5627         struct tg3 *tp = dev->priv;
5628
5629         netif_stop_queue(dev);
5630
5631         del_timer_sync(&tp->timer);
5632
5633         spin_lock_irq(&tp->lock);
5634         spin_lock(&tp->tx_lock);
5635 #if 0
5636         tg3_dump_state(tp);
5637 #endif
5638
5639         tg3_disable_ints(tp);
5640
5641         tg3_halt(tp);
5642         tg3_free_rings(tp);
5643         tp->tg3_flags &=
5644                 ~(TG3_FLAG_INIT_COMPLETE |
5645                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5646         netif_carrier_off(tp->dev);
5647
5648         spin_unlock(&tp->tx_lock);
5649         spin_unlock_irq(&tp->lock);
5650
5651         free_irq(dev->irq, dev);
5652
5653         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5654                sizeof(tp->net_stats_prev));
5655         memcpy(&tp->estats_prev, tg3_get_estats(tp),
5656                sizeof(tp->estats_prev));
5657
5658         tg3_free_consistent(tp);
5659
5660         return 0;
5661 }
5662
5663 static inline unsigned long get_stat64(tg3_stat64_t *val)
5664 {
5665         unsigned long ret;
5666
5667 #if (BITS_PER_LONG == 32)
5668         ret = val->low;
5669 #else
5670         ret = ((u64)val->high << 32) | ((u64)val->low);
5671 #endif
5672         return ret;
5673 }
5674
5675 static unsigned long calc_crc_errors(struct tg3 *tp)
5676 {
5677         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5678
5679         if (tp->phy_id != PHY_ID_SERDES &&
5680             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5681              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5682                 unsigned long flags;
5683                 u32 val;
5684
5685                 spin_lock_irqsave(&tp->lock, flags);
5686                 tg3_readphy(tp, 0x1e, &val);
5687                 tg3_writephy(tp, 0x1e, val | 0x8000);
5688                 tg3_readphy(tp, 0x14, &val);
5689                 spin_unlock_irqrestore(&tp->lock, flags);
5690
5691                 tp->phy_crc_errors += val;
5692
5693                 return tp->phy_crc_errors;
5694         }
5695
5696         return get_stat64(&hw_stats->rx_fcs_errors);
5697 }
5698
5699 #define ESTAT_ADD(member) \
5700         estats->member =        old_estats->member + \
5701                                 get_stat64(&hw_stats->member)
5702
5703 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
5704 {
5705         struct tg3_ethtool_stats *estats = &tp->estats;
5706         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
5707         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5708
5709         if (!hw_stats)
5710                 return old_estats;
5711
5712         ESTAT_ADD(rx_fragments);
5713         ESTAT_ADD(rx_ucast_packets);
5714         ESTAT_ADD(rx_bcast_packets);
5715         ESTAT_ADD(rx_fcs_errors);
5716         ESTAT_ADD(rx_xon_pause_rcvd);
5717         ESTAT_ADD(rx_xoff_pause_rcvd);
5718         ESTAT_ADD(rx_mac_ctrl_rcvd);
5719         ESTAT_ADD(rx_xoff_entered);
5720         ESTAT_ADD(rx_frame_too_long_errors);
5721         ESTAT_ADD(rx_jabbers);
5722         ESTAT_ADD(rx_undersize_packets);
5723         ESTAT_ADD(rx_in_length_errors);
5724         ESTAT_ADD(rx_out_length_errors);
5725
5726         ESTAT_ADD(tx_xon_sent);
5727         ESTAT_ADD(tx_xoff_sent);
5728         ESTAT_ADD(tx_flow_control);
5729         ESTAT_ADD(tx_mac_errors);
5730         ESTAT_ADD(tx_single_collisions);
5731         ESTAT_ADD(tx_mult_collisions);
5732         ESTAT_ADD(tx_deferred);
5733         ESTAT_ADD(tx_excessive_collisions);
5734         ESTAT_ADD(tx_late_collisions);
5735         ESTAT_ADD(tx_ucast_packets);
5736         ESTAT_ADD(tx_mcast_packets);
5737         ESTAT_ADD(tx_bcast_packets);
5738
5739         return estats;
5740 }
5741
5742 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
5743 {
5744         struct tg3 *tp = dev->priv;
5745         struct net_device_stats *stats = &tp->net_stats;
5746         struct net_device_stats *old_stats = &tp->net_stats_prev;
5747         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5748
5749         if (!hw_stats)
5750                 return old_stats;
5751
5752         stats->rx_packets = old_stats->rx_packets +
5753                 get_stat64(&hw_stats->rx_ucast_packets) +
5754                 get_stat64(&hw_stats->rx_mcast_packets) +
5755                 get_stat64(&hw_stats->rx_bcast_packets);
5756                 
5757         stats->tx_packets = old_stats->tx_packets +
5758                 get_stat64(&hw_stats->tx_ucast_packets) +
5759                 get_stat64(&hw_stats->tx_mcast_packets) +
5760                 get_stat64(&hw_stats->tx_bcast_packets);
5761
5762         stats->rx_bytes = old_stats->rx_bytes +
5763                 get_stat64(&hw_stats->rx_octets);
5764         stats->tx_bytes = old_stats->tx_bytes +
5765                 get_stat64(&hw_stats->tx_octets);
5766
5767         stats->rx_errors = old_stats->rx_errors +
5768                 get_stat64(&hw_stats->rx_errors);
5769         stats->tx_errors = old_stats->tx_errors +
5770                 get_stat64(&hw_stats->tx_errors) +
5771                 get_stat64(&hw_stats->tx_mac_errors) +
5772                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
5773                 get_stat64(&hw_stats->tx_discards);
5774
5775         stats->multicast = old_stats->multicast +
5776                 get_stat64(&hw_stats->rx_mcast_packets);
5777         stats->collisions = old_stats->collisions +
5778                 get_stat64(&hw_stats->tx_collisions);
5779
5780         stats->rx_length_errors = old_stats->rx_length_errors +
5781                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
5782                 get_stat64(&hw_stats->rx_undersize_packets);
5783
5784         stats->rx_over_errors = old_stats->rx_over_errors +
5785                 get_stat64(&hw_stats->rxbds_empty);
5786         stats->rx_frame_errors = old_stats->rx_frame_errors +
5787                 get_stat64(&hw_stats->rx_align_errors);
5788         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
5789                 get_stat64(&hw_stats->tx_discards);
5790         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
5791                 get_stat64(&hw_stats->tx_carrier_sense_errors);
5792
5793         stats->rx_crc_errors = old_stats->rx_crc_errors +
5794                 calc_crc_errors(tp);
5795
5796         return stats;
5797 }
5798
5799 static inline u32 calc_crc(unsigned char *buf, int len)
5800 {
5801         u32 reg;
5802         u32 tmp;
5803         int j, k;
5804
5805         reg = 0xffffffff;
5806
5807         for (j = 0; j < len; j++) {
5808                 reg ^= buf[j];
5809
5810                 for (k = 0; k < 8; k++) {
5811                         tmp = reg & 0x01;
5812
5813                         reg >>= 1;
5814
5815                         if (tmp) {
5816                                 reg ^= 0xedb88320;
5817                         }
5818                 }
5819         }
5820
5821         return ~reg;
5822 }
5823
5824 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
5825 {
5826         /* accept or reject all multicast frames */
5827         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
5828         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
5829         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
5830         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
5831 }
5832
5833 static void __tg3_set_rx_mode(struct net_device *dev)
5834 {
5835         struct tg3 *tp = dev->priv;
5836         u32 rx_mode;
5837
5838         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
5839                                   RX_MODE_KEEP_VLAN_TAG);
5840
5841         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
5842          * flag clear.
5843          */
5844 #if TG3_VLAN_TAG_USED
5845         if (!tp->vlgrp &&
5846             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
5847                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
5848 #else
5849         /* By definition, VLAN is disabled always in this
5850          * case.
5851          */
5852         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
5853                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
5854 #endif
5855
5856         if (dev->flags & IFF_PROMISC) {
5857                 /* Promiscuous mode. */
5858                 rx_mode |= RX_MODE_PROMISC;
5859         } else if (dev->flags & IFF_ALLMULTI) {
5860                 /* Accept all multicast. */
5861                 tg3_set_multi (tp, 1);
5862         } else if (dev->mc_count < 1) {
5863                 /* Reject all multicast. */
5864                 tg3_set_multi (tp, 0);
5865         } else {
5866                 /* Accept one or more multicast(s). */
5867                 struct dev_mc_list *mclist;
5868                 unsigned int i;
5869                 u32 mc_filter[4] = { 0, };
5870                 u32 regidx;
5871                 u32 bit;
5872                 u32 crc;
5873
5874                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
5875                      i++, mclist = mclist->next) {
5876
5877                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
5878                         bit = ~crc & 0x7f;
5879                         regidx = (bit & 0x60) >> 5;
5880                         bit &= 0x1f;
5881                         mc_filter[regidx] |= (1 << bit);
5882                 }
5883
5884                 tw32(MAC_HASH_REG_0, mc_filter[0]);
5885                 tw32(MAC_HASH_REG_1, mc_filter[1]);
5886                 tw32(MAC_HASH_REG_2, mc_filter[2]);
5887                 tw32(MAC_HASH_REG_3, mc_filter[3]);
5888         }
5889
5890         if (rx_mode != tp->rx_mode) {
5891                 tp->rx_mode = rx_mode;
5892                 tw32_f(MAC_RX_MODE, rx_mode);
5893                 udelay(10);
5894         }
5895 }
5896
5897 static void tg3_set_rx_mode(struct net_device *dev)
5898 {
5899         struct tg3 *tp = dev->priv;
5900
5901         spin_lock_irq(&tp->lock);
5902         __tg3_set_rx_mode(dev);
5903         spin_unlock_irq(&tp->lock);
5904 }
5905
5906 #define TG3_REGDUMP_LEN         (32 * 1024)
5907
5908 static int tg3_get_regs_len(struct net_device *dev)
5909 {
5910         return TG3_REGDUMP_LEN;
5911 }
5912
5913 static void tg3_get_regs(struct net_device *dev,
5914                 struct ethtool_regs *regs, void *_p)
5915 {
5916         u32 *p = _p;
5917         struct tg3 *tp = dev->priv;
5918         u8 *orig_p = _p;
5919         int i;
5920
5921         regs->version = 0;
5922
5923         memset(p, 0, TG3_REGDUMP_LEN);
5924
5925         spin_lock_irq(&tp->lock);
5926         spin_lock(&tp->tx_lock);
5927
5928 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
5929 #define GET_REG32_LOOP(base,len)                \
5930 do {    p = (u32 *)(orig_p + (base));           \
5931         for (i = 0; i < len; i += 4)            \
5932                 __GET_REG32((base) + i);        \
5933 } while (0)
5934 #define GET_REG32_1(reg)                        \
5935 do {    p = (u32 *)(orig_p + (reg));            \
5936         __GET_REG32((reg));                     \
5937 } while (0)
5938
5939         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
5940         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
5941         GET_REG32_LOOP(MAC_MODE, 0x4f0);
5942         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
5943         GET_REG32_1(SNDDATAC_MODE);
5944         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
5945         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
5946         GET_REG32_1(SNDBDC_MODE);
5947         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
5948         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
5949         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
5950         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
5951         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
5952         GET_REG32_1(RCVDCC_MODE);
5953         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
5954         GET_REG32_LOOP(RCVCC_MODE, 0x14);
5955         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
5956         GET_REG32_1(MBFREE_MODE);
5957         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
5958         GET_REG32_LOOP(MEMARB_MODE, 0x10);
5959         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
5960         GET_REG32_LOOP(RDMAC_MODE, 0x08);
5961         GET_REG32_LOOP(WDMAC_MODE, 0x08);
5962         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
5963         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
5964         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
5965         GET_REG32_LOOP(FTQ_RESET, 0x120);
5966         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
5967         GET_REG32_1(DMAC_MODE);
5968         GET_REG32_LOOP(GRC_MODE, 0x4c);
5969         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5970                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
5971
5972 #undef __GET_REG32
5973 #undef GET_REG32_LOOP
5974 #undef GET_REG32_1
5975
5976         spin_unlock(&tp->tx_lock);
5977         spin_unlock_irq(&tp->lock);
5978 }
5979
5980 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5981 {
5982         struct tg3 *tp = dev->priv;
5983   
5984         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
5985                                         tp->link_config.phy_is_low_power)
5986                 return -EAGAIN;
5987
5988         cmd->supported = (SUPPORTED_Autoneg);
5989
5990         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
5991                 cmd->supported |= (SUPPORTED_1000baseT_Half |
5992                                    SUPPORTED_1000baseT_Full);
5993
5994         if (tp->phy_id != PHY_ID_SERDES)
5995                 cmd->supported |= (SUPPORTED_100baseT_Half |
5996                                   SUPPORTED_100baseT_Full |
5997                                   SUPPORTED_10baseT_Half |
5998                                   SUPPORTED_10baseT_Full |
5999                                   SUPPORTED_MII);
6000         else
6001                 cmd->supported |= SUPPORTED_FIBRE;
6002   
6003         cmd->advertising = tp->link_config.advertising;
6004         cmd->speed = tp->link_config.active_speed;
6005         cmd->duplex = tp->link_config.active_duplex;
6006         cmd->port = 0;
6007         cmd->phy_address = PHY_ADDR;
6008         cmd->transceiver = 0;
6009         cmd->autoneg = tp->link_config.autoneg;
6010         cmd->maxtxpkt = 0;
6011         cmd->maxrxpkt = 0;
6012         return 0;
6013 }
6014   
6015 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6016 {
6017         struct tg3 *tp = dev->priv;
6018   
6019         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
6020             tp->link_config.phy_is_low_power)
6021                 return -EAGAIN;
6022
6023         if (tp->phy_id == PHY_ID_SERDES) {
6024                 /* These are the only valid advertisement bits allowed.  */
6025                 if (cmd->autoneg == AUTONEG_ENABLE &&
6026                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6027                                           ADVERTISED_1000baseT_Full |
6028                                           ADVERTISED_Autoneg |
6029                                           ADVERTISED_FIBRE)))
6030                         return -EINVAL;
6031         }
6032
6033         spin_lock_irq(&tp->lock);
6034         spin_lock(&tp->tx_lock);
6035
6036         tp->link_config.autoneg = cmd->autoneg;
6037         if (cmd->autoneg == AUTONEG_ENABLE) {
6038                 tp->link_config.advertising = cmd->advertising;
6039                 tp->link_config.speed = SPEED_INVALID;
6040                 tp->link_config.duplex = DUPLEX_INVALID;
6041         } else {
6042                 tp->link_config.advertising = 0;
6043                 tp->link_config.speed = cmd->speed;
6044                 tp->link_config.duplex = cmd->duplex;
6045         }
6046   
6047         tg3_setup_phy(tp, 1);
6048         spin_unlock(&tp->tx_lock);
6049         spin_unlock_irq(&tp->lock);
6050   
6051         return 0;
6052 }
6053   
6054 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6055 {
6056         struct tg3 *tp = dev->priv;
6057   
6058         strcpy(info->driver, DRV_MODULE_NAME);
6059         strcpy(info->version, DRV_MODULE_VERSION);
6060         strcpy(info->bus_info, pci_name(tp->pdev));
6061 }
6062   
6063 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6064 {
6065         struct tg3 *tp = dev->priv;
6066   
6067         wol->supported = WAKE_MAGIC;
6068         wol->wolopts = 0;
6069         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6070                 wol->wolopts = WAKE_MAGIC;
6071         memset(&wol->sopass, 0, sizeof(wol->sopass));
6072 }
6073   
6074 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6075 {
6076         struct tg3 *tp = dev->priv;
6077   
6078         if (wol->wolopts & ~WAKE_MAGIC)
6079                 return -EINVAL;
6080         if ((wol->wolopts & WAKE_MAGIC) &&
6081             tp->phy_id == PHY_ID_SERDES &&
6082             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6083                 return -EINVAL;
6084   
6085         spin_lock_irq(&tp->lock);
6086         if (wol->wolopts & WAKE_MAGIC)
6087                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6088         else
6089                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6090         spin_unlock_irq(&tp->lock);
6091   
6092         return 0;
6093 }
6094   
6095 static u32 tg3_get_msglevel(struct net_device *dev)
6096 {
6097         struct tg3 *tp = dev->priv;
6098         return tp->msg_enable;
6099 }
6100   
6101 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6102 {
6103         struct tg3 *tp = dev->priv;
6104         tp->msg_enable = value;
6105 }
6106   
6107 #if TG3_TSO_SUPPORT != 0
6108 static int tg3_set_tso(struct net_device *dev, u32 value)
6109 {
6110         struct tg3 *tp = dev->priv;
6111
6112         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6113                 if (value)
6114                         return -EINVAL;
6115                 return 0;
6116         }
6117         return ethtool_op_set_tso(dev, value);
6118 }
6119 #endif
6120   
6121 static int tg3_nway_reset(struct net_device *dev)
6122 {
6123         struct tg3 *tp = dev->priv;
6124         u32 bmcr;
6125         int r;
6126   
6127         spin_lock_irq(&tp->lock);
6128         tg3_readphy(tp, MII_BMCR, &bmcr);
6129         tg3_readphy(tp, MII_BMCR, &bmcr);
6130         r = -EINVAL;
6131         if (bmcr & BMCR_ANENABLE) {
6132                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6133                 r = 0;
6134         }
6135         spin_unlock_irq(&tp->lock);
6136   
6137         return r;
6138 }
6139   
6140 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6141 {
6142         struct tg3 *tp = dev->priv;
6143   
6144         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6145         ering->rx_mini_max_pending = 0;
6146         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6147
6148         ering->rx_pending = tp->rx_pending;
6149         ering->rx_mini_pending = 0;
6150         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6151         ering->tx_pending = tp->tx_pending;
6152 }
6153   
6154 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6155 {
6156         struct tg3 *tp = dev->priv;
6157   
6158         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6159             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6160             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6161                 return -EINVAL;
6162   
6163         tg3_netif_stop(tp);
6164         spin_lock_irq(&tp->lock);
6165         spin_lock(&tp->tx_lock);
6166   
6167         tp->rx_pending = ering->rx_pending;
6168
6169         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6170             tp->rx_pending > 63)
6171                 tp->rx_pending = 63;
6172         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6173         tp->tx_pending = ering->tx_pending;
6174
6175         tg3_halt(tp);
6176         tg3_init_hw(tp);
6177         netif_wake_queue(tp->dev);
6178         spin_unlock(&tp->tx_lock);
6179         spin_unlock_irq(&tp->lock);
6180         tg3_netif_start(tp);
6181   
6182         return 0;
6183 }
6184   
6185 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6186 {
6187         struct tg3 *tp = dev->priv;
6188   
6189         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6190         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6191         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6192 }
6193   
6194 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6195 {
6196         struct tg3 *tp = dev->priv;
6197   
6198         tg3_netif_stop(tp);
6199         spin_lock_irq(&tp->lock);
6200         spin_lock(&tp->tx_lock);
6201         if (epause->autoneg)
6202                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6203         else
6204                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6205         if (epause->rx_pause)
6206                 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6207         else
6208                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6209         if (epause->tx_pause)
6210                 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6211         else
6212                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6213         tg3_halt(tp);
6214         tg3_init_hw(tp);
6215         spin_unlock(&tp->tx_lock);
6216         spin_unlock_irq(&tp->lock);
6217         tg3_netif_start(tp);
6218   
6219         return 0;
6220 }
6221   
6222 static u32 tg3_get_rx_csum(struct net_device *dev)
6223 {
6224         struct tg3 *tp = dev->priv;
6225         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6226 }
6227   
6228 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6229 {
6230         struct tg3 *tp = dev->priv;
6231   
6232         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6233                 if (data != 0)
6234                         return -EINVAL;
6235                 return 0;
6236         }
6237   
6238         spin_lock_irq(&tp->lock);
6239         if (data)
6240                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6241         else
6242                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6243         spin_unlock_irq(&tp->lock);
6244   
6245         return 0;
6246 }
6247   
6248 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6249 {
6250         struct tg3 *tp = dev->priv;
6251   
6252         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6253                 if (data != 0)
6254                         return -EINVAL;
6255                 return 0;
6256         }
6257   
6258         if (data)
6259                 dev->features |= NETIF_F_IP_CSUM;
6260         else
6261                 dev->features &= ~NETIF_F_IP_CSUM;
6262
6263         return 0;
6264 }
6265
6266 static int tg3_get_stats_count (struct net_device *dev)
6267 {
6268         return TG3_NUM_STATS;
6269 }
6270
6271 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6272 {
6273         switch (stringset) {
6274         case ETH_SS_STATS:
6275                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6276                 break;
6277         default:
6278                 WARN_ON(1);     /* we need a WARN() */
6279                 break;
6280         }
6281 }
6282
6283 static void tg3_get_ethtool_stats (struct net_device *dev,
6284                                    struct ethtool_stats *estats, u64 *tmp_stats)
6285 {
6286         struct tg3 *tp = dev->priv;
6287         memcpy(tmp_stats, &tp->estats, sizeof(tp->estats));
6288 }
6289
6290 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6291 {
6292         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
6293         struct tg3 *tp = dev->priv;
6294         int err;
6295
6296         switch(cmd) {
6297         case SIOCGMIIPHY:
6298                 data->phy_id = PHY_ADDR;
6299
6300                 /* fallthru */
6301         case SIOCGMIIREG: {
6302                 u32 mii_regval;
6303
6304                 spin_lock_irq(&tp->lock);
6305                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6306                 spin_unlock_irq(&tp->lock);
6307
6308                 data->val_out = mii_regval;
6309
6310                 return err;
6311         }
6312
6313         case SIOCSMIIREG:
6314                 if (!capable(CAP_NET_ADMIN))
6315                         return -EPERM;
6316
6317                 spin_lock_irq(&tp->lock);
6318                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6319                 spin_unlock_irq(&tp->lock);
6320
6321                 return err;
6322
6323         default:
6324                 /* do nothing */
6325                 break;
6326         }
6327         return -EOPNOTSUPP;
6328 }
6329
6330 #if TG3_VLAN_TAG_USED
6331 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6332 {
6333         struct tg3 *tp = dev->priv;
6334
6335         spin_lock_irq(&tp->lock);
6336         spin_lock(&tp->tx_lock);
6337
6338         tp->vlgrp = grp;
6339
6340         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6341         __tg3_set_rx_mode(dev);
6342
6343         spin_unlock(&tp->tx_lock);
6344         spin_unlock_irq(&tp->lock);
6345 }
6346
6347 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6348 {
6349         struct tg3 *tp = dev->priv;
6350
6351         spin_lock_irq(&tp->lock);
6352         spin_lock(&tp->tx_lock);
6353         if (tp->vlgrp)
6354                 tp->vlgrp->vlan_devices[vid] = NULL;
6355         spin_unlock(&tp->tx_lock);
6356         spin_unlock_irq(&tp->lock);
6357 }
6358 #endif
6359
6360 static struct ethtool_ops tg3_ethtool_ops = {
6361         .get_settings           = tg3_get_settings,
6362         .set_settings           = tg3_set_settings,
6363         .get_drvinfo            = tg3_get_drvinfo,
6364         .get_regs_len           = tg3_get_regs_len,
6365         .get_regs               = tg3_get_regs,
6366         .get_wol                = tg3_get_wol,
6367         .set_wol                = tg3_set_wol,
6368         .get_msglevel           = tg3_get_msglevel,
6369         .set_msglevel           = tg3_set_msglevel,
6370         .nway_reset             = tg3_nway_reset,
6371         .get_link               = ethtool_op_get_link,
6372         .get_ringparam          = tg3_get_ringparam,
6373         .set_ringparam          = tg3_set_ringparam,
6374         .get_pauseparam         = tg3_get_pauseparam,
6375         .set_pauseparam         = tg3_set_pauseparam,
6376         .get_rx_csum            = tg3_get_rx_csum,
6377         .set_rx_csum            = tg3_set_rx_csum,
6378         .get_tx_csum            = ethtool_op_get_tx_csum,
6379         .set_tx_csum            = tg3_set_tx_csum,
6380         .get_sg                 = ethtool_op_get_sg,
6381         .set_sg                 = ethtool_op_set_sg,
6382 #if TG3_TSO_SUPPORT != 0
6383         .get_tso                = ethtool_op_get_tso,
6384         .set_tso                = tg3_set_tso,
6385 #endif
6386         .get_strings            = tg3_get_strings,
6387         .get_stats_count        = tg3_get_stats_count,
6388         .get_ethtool_stats      = tg3_get_ethtool_stats,
6389 };
6390
6391 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6392 static void __devinit tg3_nvram_init(struct tg3 *tp)
6393 {
6394         int j;
6395
6396         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6397                 return;
6398
6399         tw32_f(GRC_EEPROM_ADDR,
6400              (EEPROM_ADDR_FSM_RESET |
6401               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6402                EEPROM_ADDR_CLKPERD_SHIFT)));
6403
6404         /* XXX schedule_timeout() ... */
6405         for (j = 0; j < 100; j++)
6406                 udelay(10);
6407
6408         /* Enable seeprom accesses. */
6409         tw32_f(GRC_LOCAL_CTRL,
6410              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6411         udelay(100);
6412
6413         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6414             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6415                 u32 nvcfg1 = tr32(NVRAM_CFG1);
6416
6417                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6418                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6419                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6420                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6421                 } else {
6422                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6423                         tw32(NVRAM_CFG1, nvcfg1);
6424                 }
6425
6426         } else {
6427                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6428         }
6429 }
6430
6431 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6432                                                  u32 offset, u32 *val)
6433 {
6434         u32 tmp;
6435         int i;
6436
6437         if (offset > EEPROM_ADDR_ADDR_MASK ||
6438             (offset % 4) != 0)
6439                 return -EINVAL;
6440
6441         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6442                                         EEPROM_ADDR_DEVID_MASK |
6443                                         EEPROM_ADDR_READ);
6444         tw32(GRC_EEPROM_ADDR,
6445              tmp |
6446              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6447              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6448               EEPROM_ADDR_ADDR_MASK) |
6449              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6450
6451         for (i = 0; i < 10000; i++) {
6452                 tmp = tr32(GRC_EEPROM_ADDR);
6453
6454                 if (tmp & EEPROM_ADDR_COMPLETE)
6455                         break;
6456                 udelay(100);
6457         }
6458         if (!(tmp & EEPROM_ADDR_COMPLETE))
6459                 return -EBUSY;
6460
6461         *val = tr32(GRC_EEPROM_DATA);
6462         return 0;
6463 }
6464
6465 static int __devinit tg3_nvram_read(struct tg3 *tp,
6466                                     u32 offset, u32 *val)
6467 {
6468         int i, saw_done_clear;
6469
6470         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6471                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6472                 return -EINVAL;
6473         }
6474
6475         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6476                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6477
6478         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6479                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6480                           NVRAM_BUFFERED_PAGE_POS) +
6481                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6482
6483         if (offset > NVRAM_ADDR_MSK)
6484                 return -EINVAL;
6485
6486         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
6487         for (i = 0; i < 1000; i++) {
6488                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
6489                         break;
6490                 udelay(20);
6491         }
6492
6493         tw32(NVRAM_ADDR, offset);
6494         tw32(NVRAM_CMD,
6495              NVRAM_CMD_RD | NVRAM_CMD_GO |
6496              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6497
6498         /* Wait for done bit to clear then set again. */
6499         saw_done_clear = 0;
6500         for (i = 0; i < 1000; i++) {
6501                 udelay(10);
6502                 if (!saw_done_clear &&
6503                     !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
6504                         saw_done_clear = 1;
6505                 else if (saw_done_clear &&
6506                          (tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
6507                         break;
6508         }
6509         if (i >= 1000) {
6510                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
6511                 return -EBUSY;
6512         }
6513
6514         *val = swab32(tr32(NVRAM_RDDATA));
6515         tw32(NVRAM_SWARB, 0x20);
6516
6517         return 0;
6518 }
6519
6520 struct subsys_tbl_ent {
6521         u16 subsys_vendor, subsys_devid;
6522         u32 phy_id;
6523 };
6524
6525 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6526         /* Broadcom boards. */
6527         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6528         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6529         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6530         { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
6531         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6532         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6533         { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
6534         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6535         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6536         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
6537         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
6538
6539         /* 3com boards. */
6540         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6541         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6542         { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
6543         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6544         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6545
6546         /* DELL boards. */
6547         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6548         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6549         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6550         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6551
6552         /* Compaq boards. */
6553         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6554         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6555         { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
6556         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6557         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6558
6559         /* IBM boards. */
6560         { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6561 };
6562
6563 static int __devinit tg3_phy_probe(struct tg3 *tp)
6564 {
6565         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6566         u32 hw_phy_id, hw_phy_id_masked;
6567         enum phy_led_mode eeprom_led_mode;
6568         u32 val;
6569         int i, eeprom_signature_found, err;
6570
6571         tp->phy_id = PHY_ID_INVALID;
6572         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6573                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6574                      tp->pdev->subsystem_vendor) &&
6575                     (subsys_id_to_phy_id[i].subsys_devid ==
6576                      tp->pdev->subsystem_device)) {
6577                         tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6578                         break;
6579                 }
6580         }
6581
6582         eeprom_phy_id = PHY_ID_INVALID;
6583         eeprom_led_mode = led_mode_auto;
6584         eeprom_signature_found = 0;
6585         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6586         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6587                 u32 nic_cfg;
6588
6589                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6590                 tp->nic_sram_data_cfg = nic_cfg;
6591
6592                 eeprom_signature_found = 1;
6593
6594                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
6595                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
6596                         eeprom_phy_id = PHY_ID_SERDES;
6597                 } else {
6598                         u32 nic_phy_id;
6599
6600                         tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
6601                         if (nic_phy_id != 0) {
6602                                 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
6603                                 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
6604
6605                                 eeprom_phy_id  = (id1 >> 16) << 10;
6606                                 eeprom_phy_id |= (id2 & 0xfc00) << 16;
6607                                 eeprom_phy_id |= (id2 & 0x03ff) <<  0;
6608                         }
6609                 }
6610
6611                 switch (nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK) {
6612                 case NIC_SRAM_DATA_CFG_LED_TRIPLE_SPD:
6613                         eeprom_led_mode = led_mode_three_link;
6614                         break;
6615
6616                 case NIC_SRAM_DATA_CFG_LED_LINK_SPD:
6617                         eeprom_led_mode = led_mode_link10;
6618                         break;
6619
6620                 default:
6621                         eeprom_led_mode = led_mode_auto;
6622                         break;
6623                 };
6624
6625                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
6626                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
6627                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
6628                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
6629                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
6630
6631                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
6632                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6633                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
6634                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
6635         }
6636
6637         /* Reading the PHY ID register can conflict with ASF
6638          * firwmare access to the PHY hardware.
6639          */
6640         err = 0;
6641         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6642                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
6643         } else {
6644                 /* Now read the physical PHY_ID from the chip and verify
6645                  * that it is sane.  If it doesn't look good, we fall back
6646                  * to either the hard-coded table based PHY_ID and failing
6647                  * that the value found in the eeprom area.
6648                  */
6649                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
6650                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
6651
6652                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
6653                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
6654                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
6655
6656                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
6657         }
6658
6659         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
6660                 tp->phy_id = hw_phy_id;
6661         } else {
6662                 /* phy_id currently holds the value found in the
6663                  * subsys_id_to_phy_id[] table or PHY_ID_INVALID
6664                  * if a match was not found there.
6665                  */
6666                 if (tp->phy_id == PHY_ID_INVALID) {
6667                         if (!eeprom_signature_found ||
6668                             !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
6669                                 return -ENODEV;
6670                         tp->phy_id = eeprom_phy_id;
6671                 }
6672         }
6673
6674         if (tp->phy_id != PHY_ID_SERDES &&
6675             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6676                 u32 bmsr, adv_reg, tg3_ctrl;
6677
6678                 tg3_readphy(tp, MII_BMSR, &bmsr);
6679                 tg3_readphy(tp, MII_BMSR, &bmsr);
6680
6681                 if ((bmsr & BMSR_LSTATUS) &&
6682                     !(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6683                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
6684                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705))
6685                         goto skip_phy_reset;
6686                     
6687                 err = tg3_phy_reset(tp);
6688                 if (err)
6689                         return err;
6690
6691                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
6692                            ADVERTISE_100HALF | ADVERTISE_100FULL |
6693                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
6694                 tg3_ctrl = 0;
6695                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
6696                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
6697                                     MII_TG3_CTRL_ADV_1000_FULL);
6698                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
6699                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
6700                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
6701                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
6702                 }
6703
6704                 if (!tg3_copper_is_advertising_all(tp)) {
6705                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
6706
6707                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6708                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
6709
6710                         tg3_writephy(tp, MII_BMCR,
6711                                      BMCR_ANENABLE | BMCR_ANRESTART);
6712                 }
6713                 tg3_phy_set_wirespeed(tp);
6714
6715                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
6716                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6717                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
6718         }
6719
6720 skip_phy_reset:
6721         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
6722                 err = tg3_init_5401phy_dsp(tp);
6723                 if (err)
6724                         return err;
6725         }
6726
6727         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
6728                 err = tg3_init_5401phy_dsp(tp);
6729         }
6730
6731         /* Determine the PHY led mode. */
6732         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) {
6733                 tp->led_mode = led_mode_link10;
6734         } else {
6735                 tp->led_mode = led_mode_three_link;
6736                 if (eeprom_signature_found &&
6737                     eeprom_led_mode != led_mode_auto)
6738                         tp->led_mode = eeprom_led_mode;
6739         }
6740
6741         if (tp->phy_id == PHY_ID_SERDES)
6742                 tp->link_config.advertising =
6743                         (ADVERTISED_1000baseT_Half |
6744                          ADVERTISED_1000baseT_Full |
6745                          ADVERTISED_Autoneg |
6746                          ADVERTISED_FIBRE);
6747         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
6748                 tp->link_config.advertising &=
6749                         ~(ADVERTISED_1000baseT_Half |
6750                           ADVERTISED_1000baseT_Full);
6751
6752         return err;
6753 }
6754
6755 static void __devinit tg3_read_partno(struct tg3 *tp)
6756 {
6757         unsigned char vpd_data[256];
6758         int i;
6759
6760         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6761                 /* Sun decided not to put the necessary bits in the
6762                  * NVRAM of their onboard tg3 parts :(
6763                  */
6764                 strcpy(tp->board_part_number, "Sun 5704");
6765                 return;
6766         }
6767
6768         for (i = 0; i < 256; i += 4) {
6769                 u32 tmp;
6770
6771                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
6772                         goto out_not_found;
6773
6774                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
6775                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
6776                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
6777                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
6778         }
6779
6780         /* Now parse and find the part number. */
6781         for (i = 0; i < 256; ) {
6782                 unsigned char val = vpd_data[i];
6783                 int block_end;
6784
6785                 if (val == 0x82 || val == 0x91) {
6786                         i = (i + 3 +
6787                              (vpd_data[i + 1] +
6788                               (vpd_data[i + 2] << 8)));
6789                         continue;
6790                 }
6791
6792                 if (val != 0x90)
6793                         goto out_not_found;
6794
6795                 block_end = (i + 3 +
6796                              (vpd_data[i + 1] +
6797                               (vpd_data[i + 2] << 8)));
6798                 i += 3;
6799                 while (i < block_end) {
6800                         if (vpd_data[i + 0] == 'P' &&
6801                             vpd_data[i + 1] == 'N') {
6802                                 int partno_len = vpd_data[i + 2];
6803
6804                                 if (partno_len > 24)
6805                                         goto out_not_found;
6806
6807                                 memcpy(tp->board_part_number,
6808                                        &vpd_data[i + 3],
6809                                        partno_len);
6810
6811                                 /* Success. */
6812                                 return;
6813                         }
6814                 }
6815
6816                 /* Part number not found. */
6817                 goto out_not_found;
6818         }
6819
6820 out_not_found:
6821         strcpy(tp->board_part_number, "none");
6822 }
6823
6824 #ifdef CONFIG_SPARC64
6825 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
6826 {
6827         struct pci_dev *pdev = tp->pdev;
6828         struct pcidev_cookie *pcp = pdev->sysdata;
6829
6830         if (pcp != NULL) {
6831                 int node = pcp->prom_node;
6832                 u32 venid, devid;
6833                 int err;
6834
6835                 err = prom_getproperty(node, "subsystem-vendor-id",
6836                                        (char *) &venid, sizeof(venid));
6837                 if (err == 0 || err == -1)
6838                         return 0;
6839                 err = prom_getproperty(node, "subsystem-id",
6840                                        (char *) &devid, sizeof(devid));
6841                 if (err == 0 || err == -1)
6842                         return 0;
6843
6844                 if (venid == PCI_VENDOR_ID_SUN &&
6845                     devid == PCI_DEVICE_ID_TIGON3_5704)
6846                         return 1;
6847         }
6848         return 0;
6849 }
6850 #endif
6851
6852 static int __devinit tg3_get_invariants(struct tg3 *tp)
6853 {
6854         u32 misc_ctrl_reg;
6855         u32 cacheline_sz_reg;
6856         u32 pci_state_reg, grc_misc_cfg;
6857         u32 val;
6858         u16 pci_cmd;
6859         int err;
6860
6861 #ifdef CONFIG_SPARC64
6862         if (tg3_is_sun_5704(tp))
6863                 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
6864 #endif
6865
6866         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
6867          * reordering to the mailbox registers done by the host
6868          * controller can cause major troubles.  We read back from
6869          * every mailbox register write to force the writes to be
6870          * posted to the chip in order.
6871          */
6872         if (pci_find_device(PCI_VENDOR_ID_INTEL,
6873                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
6874             pci_find_device(PCI_VENDOR_ID_INTEL,
6875                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
6876             pci_find_device(PCI_VENDOR_ID_INTEL,
6877                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
6878             pci_find_device(PCI_VENDOR_ID_INTEL,
6879                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
6880             pci_find_device(PCI_VENDOR_ID_AMD,
6881                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
6882                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
6883
6884         /* Force memory write invalidate off.  If we leave it on,
6885          * then on 5700_BX chips we have to enable a workaround.
6886          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
6887          * to match the cacheline size.  The Broadcom driver have this
6888          * workaround but turns MWI off all the times so never uses
6889          * it.  This seems to suggest that the workaround is insufficient.
6890          */
6891         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6892         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
6893         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6894
6895         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
6896          * has the register indirect write enable bit set before
6897          * we try to access any of the MMIO registers.  It is also
6898          * critical that the PCI-X hw workaround situation is decided
6899          * before that as well.
6900          */
6901         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6902                               &misc_ctrl_reg);
6903
6904         tp->pci_chip_rev_id = (misc_ctrl_reg >>
6905                                MISC_HOST_CTRL_CHIPREV_SHIFT);
6906
6907         /* Initialize misc host control in PCI block. */
6908         tp->misc_host_ctrl |= (misc_ctrl_reg &
6909                                MISC_HOST_CTRL_CHIPREV);
6910         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6911                                tp->misc_host_ctrl);
6912
6913         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
6914                               &cacheline_sz_reg);
6915
6916         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
6917         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
6918         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
6919         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
6920
6921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
6922             tp->pci_lat_timer < 64) {
6923                 tp->pci_lat_timer = 64;
6924
6925                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
6926                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
6927                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
6928                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
6929
6930                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
6931                                        cacheline_sz_reg);
6932         }
6933
6934         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
6935                               &pci_state_reg);
6936
6937         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
6938                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
6939
6940                 /* If this is a 5700 BX chipset, and we are in PCI-X
6941                  * mode, enable register write workaround.
6942                  *
6943                  * The workaround is to use indirect register accesses
6944                  * for all chip writes not to mailbox registers.
6945                  */
6946                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
6947                         u32 pm_reg;
6948                         u16 pci_cmd;
6949
6950                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
6951
6952                         /* The chip can have it's power management PCI config
6953                          * space registers clobbered due to this bug.
6954                          * So explicitly force the chip into D0 here.
6955                          */
6956                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
6957                                               &pm_reg);
6958                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
6959                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
6960                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
6961                                                pm_reg);
6962
6963                         /* Also, force SERR#/PERR# in PCI command. */
6964                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6965                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
6966                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6967                 }
6968         }
6969
6970         /* Back to back register writes can cause problems on this chip,
6971          * the workaround is to read back all reg writes except those to
6972          * mailbox regs.  See tg3_write_indirect_reg32().
6973          */
6974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
6975                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
6976
6977         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
6978                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
6979         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
6980                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
6981
6982         /* Chip-specific fixup from Broadcom driver */
6983         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
6984             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
6985                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
6986                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
6987         }
6988
6989         /* Force the chip into D0. */
6990         err = tg3_set_power_state(tp, 0);
6991         if (err) {
6992                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
6993                        pci_name(tp->pdev));
6994                 return err;
6995         }
6996
6997         /* 5700 B0 chips do not support checksumming correctly due
6998          * to hardware bugs.
6999          */
7000         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
7001                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
7002
7003         /* Pseudo-header checksum is done by hardware logic and not
7004          * the offload processers, so make the chip do the pseudo-
7005          * header checksums on receive.  For transmit it is more
7006          * convenient to do the pseudo-header checksum in software
7007          * as Linux does that on transmit for us in all cases.
7008          */
7009         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
7010         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
7011
7012         /* Derive initial jumbo mode from MTU assigned in
7013          * ether_setup() via the alloc_etherdev() call
7014          */
7015         if (tp->dev->mtu > ETH_DATA_LEN)
7016                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
7017
7018         /* Determine WakeOnLan speed to use. */
7019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7020             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7021             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
7022             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
7023                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
7024         } else {
7025                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
7026         }
7027
7028         /* A few boards don't want Ethernet@WireSpeed phy feature */
7029         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
7030             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
7031              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
7032              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
7033                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
7034
7035         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
7036             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
7037                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
7038         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
7039                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
7040
7041         /* Note: 5750 also needs this flag set to improve bit error rate. */
7042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
7043                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
7044
7045         /* Only 5701 and later support tagged irq status mode.
7046          * Also, 5788 chips cannot use tagged irq status.
7047          *
7048          * However, since we are using NAPI avoid tagged irq status
7049          * because the interrupt condition is more difficult to
7050          * fully clear in that mode.
7051          */
7052         tp->coalesce_mode = 0;
7053
7054         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
7055             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
7056                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
7057
7058         /* Initialize MAC MI mode, polling disabled. */
7059         tw32_f(MAC_MI_MODE, tp->mi_mode);
7060         udelay(40);
7061
7062         /* Initialize data/descriptor byte/word swapping. */
7063         val = tr32(GRC_MODE);
7064         val &= GRC_MODE_HOST_STACKUP;
7065         tw32(GRC_MODE, val | tp->grc_mode);
7066
7067         tg3_switch_clocks(tp);
7068
7069         /* Clear this out for sanity. */
7070         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7071
7072         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7073                               &pci_state_reg);
7074         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
7075             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
7076                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
7077
7078                 if (chiprevid == CHIPREV_ID_5701_A0 ||
7079                     chiprevid == CHIPREV_ID_5701_B0 ||
7080                     chiprevid == CHIPREV_ID_5701_B2 ||
7081                     chiprevid == CHIPREV_ID_5701_B5) {
7082                         unsigned long sram_base;
7083
7084                         /* Write some dummy words into the SRAM status block
7085                          * area, see if it reads back correctly.  If the return
7086                          * value is bad, force enable the PCIX workaround.
7087                          */
7088                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
7089
7090                         writel(0x00000000, sram_base);
7091                         writel(0x00000000, sram_base + 4);
7092                         writel(0xffffffff, sram_base + 4);
7093                         if (readl(sram_base) != 0x00000000)
7094                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7095                 }
7096         }
7097
7098         udelay(50);
7099         tg3_nvram_init(tp);
7100
7101         /* Determine if TX descriptors will reside in
7102          * main memory or in the chip SRAM.
7103          */
7104         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
7105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
7106                 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
7107
7108         grc_misc_cfg = tr32(GRC_MISC_CFG);
7109         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
7110
7111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7112             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
7113                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
7114                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
7115         }
7116
7117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7118             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
7119              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
7120                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
7121
7122         /* these are limited to 10/100 only */
7123         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7124              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7125             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7126              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7127              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7128               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7129               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)))
7130                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7131
7132         err = tg3_phy_probe(tp);
7133         if (err) {
7134                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7135                        pci_name(tp->pdev), err);
7136                 /* ... but do not return immediately ... */
7137         }
7138
7139         tg3_read_partno(tp);
7140
7141         if (tp->phy_id == PHY_ID_SERDES) {
7142                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7143
7144                 /* And override led_mode in case Dell ever makes
7145                  * a fibre board.
7146                  */
7147                 tp->led_mode = led_mode_three_link;
7148         } else {
7149                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7150                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7151                 else
7152                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7153         }
7154
7155         /* 5700 {AX,BX} chips have a broken status block link
7156          * change bit implementation, so we must use the
7157          * status register in those cases.
7158          */
7159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7160                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7161         else
7162                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7163
7164         /* The led_mode is set during tg3_phy_probe, here we might
7165          * have to force the link status polling mechanism based
7166          * upon subsystem IDs.
7167          */
7168         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7169             tp->phy_id != PHY_ID_SERDES) {
7170                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7171                                   TG3_FLAG_USE_LINKCHG_REG);
7172         }
7173
7174         /* For all SERDES we poll the MAC status register. */
7175         if (tp->phy_id == PHY_ID_SERDES)
7176                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7177         else
7178                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7179
7180         /* 5700 BX chips need to have their TX producer index mailboxes
7181          * written twice to workaround a bug.
7182          */
7183         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7184                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7185         else
7186                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7187
7188         /* 5700 chips can get confused if TX buffers straddle the
7189          * 4GB address boundary in some cases.
7190          */
7191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7192                 tp->dev->hard_start_xmit = tg3_start_xmit_4gbug;
7193         else
7194                 tp->dev->hard_start_xmit = tg3_start_xmit;
7195
7196         tp->rx_offset = 2;
7197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7198             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7199                 tp->rx_offset = 0;
7200
7201         /* By default, disable wake-on-lan.  User can change this
7202          * using ETHTOOL_SWOL.
7203          */
7204         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7205
7206         return err;
7207 }
7208
7209 #ifdef CONFIG_SPARC64
7210 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7211 {
7212         struct net_device *dev = tp->dev;
7213         struct pci_dev *pdev = tp->pdev;
7214         struct pcidev_cookie *pcp = pdev->sysdata;
7215
7216         if (pcp != NULL) {
7217                 int node = pcp->prom_node;
7218
7219                 if (prom_getproplen(node, "local-mac-address") == 6) {
7220                         prom_getproperty(node, "local-mac-address",
7221                                          dev->dev_addr, 6);
7222                         return 0;
7223                 }
7224         }
7225         return -ENODEV;
7226 }
7227
7228 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7229 {
7230         struct net_device *dev = tp->dev;
7231
7232         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7233         return 0;
7234 }
7235 #endif
7236
7237 static int __devinit tg3_get_device_address(struct tg3 *tp)
7238 {
7239         struct net_device *dev = tp->dev;
7240         u32 hi, lo, mac_offset;
7241
7242 #ifdef CONFIG_SPARC64
7243         if (!tg3_get_macaddr_sparc(tp))
7244                 return 0;
7245 #endif
7246
7247         if (PCI_FUNC(tp->pdev->devfn) == 0)
7248                 mac_offset = 0x7c;
7249         else
7250                 mac_offset = 0xcc;
7251
7252         /* First try to get it from MAC address mailbox. */
7253         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7254         if ((hi >> 16) == 0x484b) {
7255                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7256                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7257
7258                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7259                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7260                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7261                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7262                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7263         }
7264         /* Next, try NVRAM. */
7265         else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7266                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7267                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7268                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7269                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7270                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7271                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7272                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7273                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7274         }
7275         /* Finally just fetch it out of the MAC control regs. */
7276         else {
7277                 hi = tr32(MAC_ADDR_0_HIGH);
7278                 lo = tr32(MAC_ADDR_0_LOW);
7279
7280                 dev->dev_addr[5] = lo & 0xff;
7281                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7282                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7283                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7284                 dev->dev_addr[1] = hi & 0xff;
7285                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7286         }
7287
7288         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7289 #ifdef CONFIG_SPARC64
7290                 if (!tg3_get_default_macaddr_sparc(tp))
7291                         return 0;
7292 #endif
7293                 return -EINVAL;
7294         }
7295         return 0;
7296 }
7297
7298 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7299 {
7300         struct tg3_internal_buffer_desc test_desc;
7301         u32 sram_dma_descs;
7302         int i, ret;
7303
7304         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7305
7306         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7307         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7308         tw32(RDMAC_STATUS, 0);
7309         tw32(WDMAC_STATUS, 0);
7310
7311         tw32(BUFMGR_MODE, 0);
7312         tw32(FTQ_RESET, 0);
7313
7314         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7315         test_desc.addr_lo = buf_dma & 0xffffffff;
7316         test_desc.nic_mbuf = 0x00002100;
7317         test_desc.len = size;
7318
7319         /*
7320          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7321          * the *second* time the tg3 driver was getting loaded after an
7322          * initial scan.
7323          *
7324          * Broadcom tells me:
7325          *   ...the DMA engine is connected to the GRC block and a DMA
7326          *   reset may affect the GRC block in some unpredictable way...
7327          *   The behavior of resets to individual blocks has not been tested.
7328          *
7329          * Broadcom noted the GRC reset will also reset all sub-components.
7330          */
7331         if (to_device) {
7332                 test_desc.cqid_sqid = (13 << 8) | 2;
7333
7334                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
7335                 udelay(40);
7336         } else {
7337                 test_desc.cqid_sqid = (16 << 8) | 7;
7338
7339                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
7340                 udelay(40);
7341         }
7342         test_desc.flags = 0x00000005;
7343
7344         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7345                 u32 val;
7346
7347                 val = *(((u32 *)&test_desc) + i);
7348                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7349                                        sram_dma_descs + (i * sizeof(u32)));
7350                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7351         }
7352         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7353
7354         if (to_device) {
7355                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7356         } else {
7357                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7358         }
7359
7360         ret = -ENODEV;
7361         for (i = 0; i < 40; i++) {
7362                 u32 val;
7363
7364                 if (to_device)
7365                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7366                 else
7367                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7368                 if ((val & 0xffff) == sram_dma_descs) {
7369                         ret = 0;
7370                         break;
7371                 }
7372
7373                 udelay(100);
7374         }
7375
7376         return ret;
7377 }
7378
7379 #define TEST_BUFFER_SIZE        0x400
7380
7381 static int __devinit tg3_test_dma(struct tg3 *tp)
7382 {
7383         dma_addr_t buf_dma;
7384         u32 *buf;
7385         int ret;
7386
7387         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7388         if (!buf) {
7389                 ret = -ENOMEM;
7390                 goto out_nofree;
7391         }
7392
7393         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) == 0) {
7394                 tp->dma_rwctrl =
7395                         (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7396                         (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
7397                         (0x7 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
7398                         (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
7399                         (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
7400                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
7401                         tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA
7402                                             << DMA_RWCTRL_MIN_DMA_SHIFT);
7403         } else {
7404                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7405                         tp->dma_rwctrl =
7406                                 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7407                                 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
7408                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
7409                                 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
7410                                 (0x00 << DMA_RWCTRL_MIN_DMA_SHIFT);
7411                 else
7412                         tp->dma_rwctrl =
7413                                 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7414                                 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
7415                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
7416                                 (0x3 << DMA_RWCTRL_READ_WATER_SHIFT) |
7417                                 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
7418
7419                 /* Wheee, some more chip bugs... */
7420                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7421                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7422                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7423
7424                         if (ccval == 0x6 || ccval == 0x7)
7425                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7426                 }
7427         }
7428
7429         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7430             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7431                 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA
7432                                     << DMA_RWCTRL_MIN_DMA_SHIFT);
7433
7434         /* We don't do this on x86 because it seems to hurt performace.
7435          * It does help things on other platforms though.
7436          */
7437 #ifndef CONFIG_X86
7438         {
7439                 u8 byte;
7440                 int cacheline_size;
7441                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7442
7443                 if (byte == 0)
7444                         cacheline_size = 1024;
7445                 else
7446                         cacheline_size = (int) byte * 4;
7447
7448                 tp->dma_rwctrl &= ~(DMA_RWCTRL_READ_BNDRY_MASK |
7449                                     DMA_RWCTRL_WRITE_BNDRY_MASK);
7450
7451                 switch (cacheline_size) {
7452                 case 16:
7453                         tp->dma_rwctrl |=
7454                                 (DMA_RWCTRL_READ_BNDRY_16 |
7455                                  DMA_RWCTRL_WRITE_BNDRY_16);
7456                         break;
7457
7458                 case 32:
7459                         tp->dma_rwctrl |=
7460                                 (DMA_RWCTRL_READ_BNDRY_32 |
7461                                  DMA_RWCTRL_WRITE_BNDRY_32);
7462                         break;
7463
7464                 case 64:
7465                         tp->dma_rwctrl |=
7466                                 (DMA_RWCTRL_READ_BNDRY_64 |
7467                                  DMA_RWCTRL_WRITE_BNDRY_64);
7468                         break;
7469
7470                 case 128:
7471                         tp->dma_rwctrl |=
7472                                 (DMA_RWCTRL_READ_BNDRY_128 |
7473                                  DMA_RWCTRL_WRITE_BNDRY_128);
7474                         break;
7475
7476                 case 256:
7477                         tp->dma_rwctrl |=
7478                                 (DMA_RWCTRL_READ_BNDRY_256 |
7479                                  DMA_RWCTRL_WRITE_BNDRY_256);
7480                         break;
7481
7482                 case 512:
7483                         tp->dma_rwctrl |=
7484                                 (DMA_RWCTRL_READ_BNDRY_512 |
7485                                  DMA_RWCTRL_WRITE_BNDRY_512);
7486                         break;
7487
7488                 case 1024:
7489                         tp->dma_rwctrl |=
7490                                 (DMA_RWCTRL_READ_BNDRY_1024 |
7491                                  DMA_RWCTRL_WRITE_BNDRY_1024);
7492                         break;
7493                 };
7494         }
7495 #endif
7496
7497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7498             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7499                 /* Remove this if it causes problems for some boards. */
7500                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7501
7502                 /* On 5700/5701 chips, we need to set this bit.
7503                  * Otherwise the chip will issue cacheline transactions
7504                  * to streamable DMA memory with not all the byte
7505                  * enables turned on.  This is an error on several
7506                  * RISC PCI controllers, in particular sparc64.
7507                  *
7508                  * On 5703/5704 chips, this bit has been reassigned
7509                  * a different meaning.  In particular, it is used
7510                  * on those chips to enable a PCI-X workaround.
7511                  */
7512                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7513         }
7514
7515         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7516
7517 #if 0
7518         /* Unneeded, already done by tg3_get_invariants.  */
7519         tg3_switch_clocks(tp);
7520 #endif
7521
7522         ret = 0;
7523         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7524             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7525                 goto out;
7526
7527         while (1) {
7528                 u32 *p = buf, i;
7529
7530                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7531                         p[i] = i;
7532
7533                 /* Send the buffer to the chip. */
7534                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7535                 if (ret) {
7536                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7537                         break;
7538                 }
7539
7540 #if 0
7541                 /* validate data reached card RAM correctly. */
7542                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7543                         u32 val;
7544                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7545                         if (le32_to_cpu(val) != p[i]) {
7546                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
7547                                 /* ret = -ENODEV here? */
7548                         }
7549                         p[i] = 0;
7550                 }
7551 #endif
7552                 /* Now read it back. */
7553                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7554                 if (ret) {
7555                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7556
7557                         break;
7558                 }
7559
7560                 /* Verify it. */
7561                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7562                         if (p[i] == i)
7563                                 continue;
7564
7565                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7566                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7567                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7568                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7569                                 break;
7570                         } else {
7571                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7572                                 ret = -ENODEV;
7573                                 goto out;
7574                         }
7575                 }
7576
7577                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
7578                         /* Success. */
7579                         ret = 0;
7580                         break;
7581                 }
7582         }
7583
7584 out:
7585         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
7586 out_nofree:
7587         return ret;
7588 }
7589
7590 static void __devinit tg3_init_link_config(struct tg3 *tp)
7591 {
7592         tp->link_config.advertising =
7593                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
7594                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
7595                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
7596                  ADVERTISED_Autoneg | ADVERTISED_MII);
7597         tp->link_config.speed = SPEED_INVALID;
7598         tp->link_config.duplex = DUPLEX_INVALID;
7599         tp->link_config.autoneg = AUTONEG_ENABLE;
7600         netif_carrier_off(tp->dev);
7601         tp->link_config.active_speed = SPEED_INVALID;
7602         tp->link_config.active_duplex = DUPLEX_INVALID;
7603         tp->link_config.phy_is_low_power = 0;
7604         tp->link_config.orig_speed = SPEED_INVALID;
7605         tp->link_config.orig_duplex = DUPLEX_INVALID;
7606         tp->link_config.orig_autoneg = AUTONEG_INVALID;
7607 }
7608
7609 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
7610 {
7611         tp->bufmgr_config.mbuf_read_dma_low_water =
7612                 DEFAULT_MB_RDMA_LOW_WATER;
7613         tp->bufmgr_config.mbuf_mac_rx_low_water =
7614                 DEFAULT_MB_MACRX_LOW_WATER;
7615         tp->bufmgr_config.mbuf_high_water =
7616                 DEFAULT_MB_HIGH_WATER;
7617
7618         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
7619                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
7620         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
7621                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
7622         tp->bufmgr_config.mbuf_high_water_jumbo =
7623                 DEFAULT_MB_HIGH_WATER_JUMBO;
7624
7625         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
7626         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
7627 }
7628
7629 static char * __devinit tg3_phy_string(struct tg3 *tp)
7630 {
7631         switch (tp->phy_id & PHY_ID_MASK) {
7632         case PHY_ID_BCM5400:    return "5400";
7633         case PHY_ID_BCM5401:    return "5401";
7634         case PHY_ID_BCM5411:    return "5411";
7635         case PHY_ID_BCM5701:    return "5701";
7636         case PHY_ID_BCM5703:    return "5703";
7637         case PHY_ID_BCM5704:    return "5704";
7638         case PHY_ID_BCM5705:    return "5705";
7639         case PHY_ID_BCM8002:    return "8002";
7640         case PHY_ID_SERDES:     return "serdes";
7641         default:                return "unknown";
7642         };
7643 }
7644
7645 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
7646 {
7647         struct pci_dev *peer;
7648         unsigned int func, devnr = tp->pdev->devfn & ~7;
7649
7650         for (func = 0; func < 8; func++) {
7651                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
7652                 if (peer && peer != tp->pdev)
7653                         break;
7654                 pci_dev_put(peer);
7655         }
7656         if (!peer || peer == tp->pdev)
7657                 BUG();
7658
7659         /*
7660          * We don't need to keep the refcount elevated; there's no way
7661          * to remove one half of this device without removing the other
7662          */
7663         pci_dev_put(peer);
7664
7665         return peer;
7666 }
7667
7668 static int __devinit tg3_init_one(struct pci_dev *pdev,
7669                                   const struct pci_device_id *ent)
7670 {
7671         static int tg3_version_printed = 0;
7672         unsigned long tg3reg_base, tg3reg_len;
7673         struct net_device *dev;
7674         struct tg3 *tp;
7675         int i, err, pci_using_dac, pm_cap;
7676
7677         if (tg3_version_printed++ == 0)
7678                 printk(KERN_INFO "%s", version);
7679
7680         err = pci_enable_device(pdev);
7681         if (err) {
7682                 printk(KERN_ERR PFX "Cannot enable PCI device, "
7683                        "aborting.\n");
7684                 return err;
7685         }
7686
7687         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7688                 printk(KERN_ERR PFX "Cannot find proper PCI device "
7689                        "base address, aborting.\n");
7690                 err = -ENODEV;
7691                 goto err_out_disable_pdev;
7692         }
7693
7694         err = pci_request_regions(pdev, DRV_MODULE_NAME);
7695         if (err) {
7696                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
7697                        "aborting.\n");
7698                 goto err_out_disable_pdev;
7699         }
7700
7701         pci_set_master(pdev);
7702
7703         /* Find power-management capability. */
7704         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7705         if (pm_cap == 0) {
7706                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
7707                        "aborting.\n");
7708                 err = -EIO;
7709                 goto err_out_free_res;
7710         }
7711
7712         /* Configure DMA attributes. */
7713         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
7714         if (!err) {
7715                 pci_using_dac = 1;
7716                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
7717                 if (err < 0) {
7718                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
7719                                "for consistent allocations\n");
7720                         goto err_out_free_res;
7721                 }
7722         } else {
7723                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
7724                 if (err) {
7725                         printk(KERN_ERR PFX "No usable DMA configuration, "
7726                                "aborting.\n");
7727                         goto err_out_free_res;
7728                 }
7729                 pci_using_dac = 0;
7730         }
7731
7732         tg3reg_base = pci_resource_start(pdev, 0);
7733         tg3reg_len = pci_resource_len(pdev, 0);
7734
7735         dev = alloc_etherdev(sizeof(*tp));
7736         if (!dev) {
7737                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
7738                 err = -ENOMEM;
7739                 goto err_out_free_res;
7740         }
7741
7742         SET_MODULE_OWNER(dev);
7743         SET_NETDEV_DEV(dev, &pdev->dev);
7744
7745         if (pci_using_dac)
7746                 dev->features |= NETIF_F_HIGHDMA;
7747 #if TG3_VLAN_TAG_USED
7748         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7749         dev->vlan_rx_register = tg3_vlan_rx_register;
7750         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
7751 #endif
7752
7753         tp = dev->priv;
7754         tp->pdev = pdev;
7755         tp->dev = dev;
7756         tp->pm_cap = pm_cap;
7757         tp->mac_mode = TG3_DEF_MAC_MODE;
7758         tp->rx_mode = TG3_DEF_RX_MODE;
7759         tp->tx_mode = TG3_DEF_TX_MODE;
7760         tp->mi_mode = MAC_MI_MODE_BASE;
7761         if (tg3_debug > 0)
7762                 tp->msg_enable = tg3_debug;
7763         else
7764                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
7765
7766         /* The word/byte swap controls here control register access byte
7767          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
7768          * setting below.
7769          */
7770         tp->misc_host_ctrl =
7771                 MISC_HOST_CTRL_MASK_PCI_INT |
7772                 MISC_HOST_CTRL_WORD_SWAP |
7773                 MISC_HOST_CTRL_INDIR_ACCESS |
7774                 MISC_HOST_CTRL_PCISTATE_RW;
7775
7776         /* The NONFRM (non-frame) byte/word swap controls take effect
7777          * on descriptor entries, anything which isn't packet data.
7778          *
7779          * The StrongARM chips on the board (one for tx, one for rx)
7780          * are running in big-endian mode.
7781          */
7782         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
7783                         GRC_MODE_WSWAP_NONFRM_DATA);
7784 #ifdef __BIG_ENDIAN
7785         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
7786 #endif
7787         spin_lock_init(&tp->lock);
7788         spin_lock_init(&tp->tx_lock);
7789         spin_lock_init(&tp->indirect_lock);
7790         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
7791
7792         tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
7793         if (tp->regs == 0UL) {
7794                 printk(KERN_ERR PFX "Cannot map device registers, "
7795                        "aborting.\n");
7796                 err = -ENOMEM;
7797                 goto err_out_free_dev;
7798         }
7799
7800         tg3_init_link_config(tp);
7801
7802         tg3_init_bufmgr_config(tp);
7803
7804         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
7805         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
7806         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
7807
7808         dev->open = tg3_open;
7809         dev->stop = tg3_close;
7810         dev->get_stats = tg3_get_stats;
7811         dev->set_multicast_list = tg3_set_rx_mode;
7812         dev->set_mac_address = tg3_set_mac_addr;
7813         dev->do_ioctl = tg3_ioctl;
7814         dev->tx_timeout = tg3_tx_timeout;
7815         dev->poll = tg3_poll;
7816         dev->ethtool_ops = &tg3_ethtool_ops;
7817         dev->weight = 64;
7818         dev->watchdog_timeo = TG3_TX_TIMEOUT;
7819         dev->change_mtu = tg3_change_mtu;
7820         dev->irq = pdev->irq;
7821 #ifdef CONFIG_NET_POLL_CONTROLLER
7822         dev->poll_controller = tg3_poll_controller;
7823 #endif
7824
7825         err = tg3_get_invariants(tp);
7826         if (err) {
7827                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
7828                        "aborting.\n");
7829                 goto err_out_iounmap;
7830         }
7831
7832         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7833                 tp->bufmgr_config.mbuf_read_dma_low_water =
7834                         DEFAULT_MB_RDMA_LOW_WATER_5705;
7835                 tp->bufmgr_config.mbuf_mac_rx_low_water =
7836                         DEFAULT_MB_MACRX_LOW_WATER_5705;
7837                 tp->bufmgr_config.mbuf_high_water =
7838                         DEFAULT_MB_HIGH_WATER_5705;
7839         }
7840
7841 #if TG3_TSO_SUPPORT != 0
7842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7843             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7844             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
7845             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
7846             (tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7847                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7848         } else {
7849                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7850         }
7851
7852         /* TSO is off by default, user can enable using ethtool.  */
7853 #if 0
7854         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
7855                 dev->features |= NETIF_F_TSO;
7856 #endif
7857
7858 #endif
7859
7860         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
7861             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7862             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
7863                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
7864                 tp->rx_pending = 63;
7865         }
7866
7867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7868                 tp->pdev_peer = tg3_find_5704_peer(tp);
7869
7870         err = tg3_get_device_address(tp);
7871         if (err) {
7872                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
7873                        "aborting.\n");
7874                 goto err_out_iounmap;
7875         }
7876
7877         /*
7878          * Reset chip in case UNDI or EFI driver did not shutdown
7879          * DMA self test will enable WDMAC and we'll see (spurious)
7880          * pending DMA on the PCI bus at that point.
7881          */
7882         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
7883             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7884                 pci_save_state(tp->pdev, tp->pci_cfg_state);
7885                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
7886                 tg3_halt(tp);
7887         }
7888
7889         err = tg3_test_dma(tp);
7890         if (err) {
7891                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
7892                 goto err_out_iounmap;
7893         }
7894
7895         /* Tigon3 can do ipv4 only... and some chips have buggy
7896          * checksumming.
7897          */
7898         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
7899                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7900                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7901         } else
7902                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7903
7904         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
7905                 dev->features &= ~NETIF_F_HIGHDMA;
7906
7907         err = register_netdev(dev);
7908         if (err) {
7909                 printk(KERN_ERR PFX "Cannot register net device, "
7910                        "aborting.\n");
7911                 goto err_out_iounmap;
7912         }
7913
7914         pci_set_drvdata(pdev, dev);
7915
7916         /* Now that we have fully setup the chip, save away a snapshot
7917          * of the PCI config space.  We need to restore this after
7918          * GRC_MISC_CFG core clock resets and some resume events.
7919          */
7920         pci_save_state(tp->pdev, tp->pci_cfg_state);
7921
7922         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
7923                dev->name,
7924                tp->board_part_number,
7925                tp->pci_chip_rev_id,
7926                tg3_phy_string(tp),
7927                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
7928                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
7929                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
7930                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
7931                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
7932                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
7933
7934         for (i = 0; i < 6; i++)
7935                 printk("%2.2x%c", dev->dev_addr[i],
7936                        i == 5 ? '\n' : ':');
7937
7938         printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
7939                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
7940                "TSOcap[%d] \n",
7941                dev->name,
7942                (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
7943                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
7944                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
7945                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
7946                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
7947                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
7948                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
7949                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
7950
7951         return 0;
7952
7953 err_out_iounmap:
7954         iounmap((void *) tp->regs);
7955
7956 err_out_free_dev:
7957         free_netdev(dev);
7958
7959 err_out_free_res:
7960         pci_release_regions(pdev);
7961
7962 err_out_disable_pdev:
7963         pci_disable_device(pdev);
7964         pci_set_drvdata(pdev, NULL);
7965         return err;
7966 }
7967
7968 static void __devexit tg3_remove_one(struct pci_dev *pdev)
7969 {
7970         struct net_device *dev = pci_get_drvdata(pdev);
7971
7972         if (dev) {
7973                 unregister_netdev(dev);
7974                 iounmap((void *) ((struct tg3 *)(dev->priv))->regs);
7975                 free_netdev(dev);
7976                 pci_release_regions(pdev);
7977                 pci_disable_device(pdev);
7978                 pci_set_drvdata(pdev, NULL);
7979         }
7980 }
7981
7982 static int tg3_suspend(struct pci_dev *pdev, u32 state)
7983 {
7984         struct net_device *dev = pci_get_drvdata(pdev);
7985         struct tg3 *tp = dev->priv;
7986         int err;
7987
7988         if (!netif_running(dev))
7989                 return 0;
7990
7991         tg3_netif_stop(tp);
7992
7993         del_timer_sync(&tp->timer);
7994
7995         spin_lock_irq(&tp->lock);
7996         spin_lock(&tp->tx_lock);
7997         tg3_disable_ints(tp);
7998         spin_unlock(&tp->tx_lock);
7999         spin_unlock_irq(&tp->lock);
8000
8001         netif_device_detach(dev);
8002
8003         spin_lock_irq(&tp->lock);
8004         spin_lock(&tp->tx_lock);
8005         tg3_halt(tp);
8006         spin_unlock(&tp->tx_lock);
8007         spin_unlock_irq(&tp->lock);
8008
8009         err = tg3_set_power_state(tp, state);
8010         if (err) {
8011                 spin_lock_irq(&tp->lock);
8012                 spin_lock(&tp->tx_lock);
8013
8014                 tg3_init_hw(tp);
8015
8016                 tp->timer.expires = jiffies + tp->timer_offset;
8017                 add_timer(&tp->timer);
8018
8019                 spin_unlock(&tp->tx_lock);
8020                 spin_unlock_irq(&tp->lock);
8021
8022                 netif_device_attach(dev);
8023                 tg3_netif_start(tp);
8024         }
8025
8026         return err;
8027 }
8028
8029 static int tg3_resume(struct pci_dev *pdev)
8030 {
8031         struct net_device *dev = pci_get_drvdata(pdev);
8032         struct tg3 *tp = dev->priv;
8033         int err;
8034
8035         if (!netif_running(dev))
8036                 return 0;
8037
8038         err = tg3_set_power_state(tp, 0);
8039         if (err)
8040                 return err;
8041
8042         netif_device_attach(dev);
8043
8044         spin_lock_irq(&tp->lock);
8045         spin_lock(&tp->tx_lock);
8046
8047         tg3_init_hw(tp);
8048
8049         tp->timer.expires = jiffies + tp->timer_offset;
8050         add_timer(&tp->timer);
8051
8052         tg3_enable_ints(tp);
8053
8054         spin_unlock(&tp->tx_lock);
8055         spin_unlock_irq(&tp->lock);
8056
8057         tg3_netif_start(tp);
8058
8059         return 0;
8060 }
8061
8062 static struct pci_driver tg3_driver = {
8063         .name           = DRV_MODULE_NAME,
8064         .id_table       = tg3_pci_tbl,
8065         .probe          = tg3_init_one,
8066         .remove         = __devexit_p(tg3_remove_one),
8067         .suspend        = tg3_suspend,
8068         .resume         = tg3_resume
8069 };
8070
8071 static int __init tg3_init(void)
8072 {
8073         return pci_module_init(&tg3_driver);
8074 }
8075
8076 static void __exit tg3_cleanup(void)
8077 {
8078         pci_unregister_driver(&tg3_driver);
8079 }
8080
8081 module_init(tg3_init);
8082 module_exit(tg3_cleanup);