fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / net / tg3.c
index 0e46154..f4bf62c 100644 (file)
@@ -4,17 +4,26 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2005 Broadcom Corporation.
+ *
+ * Firmware is:
+ *     Derived from proprietary unpublished source code,
+ *     Copyright (C) 2000-2003 Broadcom Corporation.
+ *
+ *     Permission is hereby granted for the distribution of this firmware
+ *     data in hexadecimal or equivalent format, provided this copyright
+ *     notice is accompanying it.
  */
 
-#include <linux/config.h>
 
 #include <linux/module.h>
-
+#include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/in.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
@@ -27,6 +36,8 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dma-mapping.h>
 
 #include <net/checksum.h>
 
@@ -57,8 +68,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.8"
-#define DRV_MODULE_RELDATE     "July 14, 2004"
+#define DRV_MODULE_VERSION     "3.72"
+#define DRV_MODULE_RELDATE     "January 8, 2007"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -81,8 +92,7 @@
 /* hardware minimum and maximum for a single frame's data payload */
 #define TG3_MIN_MTU                    60
 #define TG3_MAX_MTU(tp)        \
-       ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
-         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
+       ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
 
 /* These numbers seem to be hard coded in the NIC firmware somehow.
  * You can't change the ring sizes, but you can change where you place
  * replace things like '% foo' with '& (foo - 1)'.
  */
 #define TG3_RX_RCB_RING_SIZE(tp)       \
-       ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || \
-         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ? \
-        512 : 1024)
+       ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
 
 #define TG3_TX_RING_SIZE               512
 #define TG3_DEF_TX_RING_PENDING                (TG3_TX_RING_SIZE - 1)
                                   TG3_RX_RCB_RING_SIZE(tp))
 #define TG3_TX_RING_BYTES      (sizeof(struct tg3_tx_buffer_desc) * \
                                 TG3_TX_RING_SIZE)
-#define TX_RING_GAP(TP)        \
-       (TG3_TX_RING_SIZE - (TP)->tx_pending)
-#define TX_BUFFS_AVAIL(TP)                                             \
-       (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
-         (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
-         (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
 #define NEXT_TX(N)             (((N) + 1) & (TG3_TX_RING_SIZE - 1))
 
 #define RX_PKT_BUF_SZ          (1536 + tp->rx_offset + 64)
 #define RX_JUMBO_PKT_BUF_SZ    (9046 + tp->rx_offset + 64)
 
 /* minimum number of free TX descriptors required to wake up TX process */
-#define TG3_TX_WAKEUP_THRESH           (TG3_TX_RING_SIZE / 4)
+#define TG3_TX_WAKEUP_THRESH(tp)               ((tp)->tx_pending / 4)
 
 /* number of ETHTOOL_GSTATS u64's */
 #define TG3_NUM_STATS          (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
 
+#define TG3_NUM_TEST           6
+
 static char version[] __devinitdata =
        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
 MODULE_LICENSE("GPL");
-MODULE_PARM(tg3_debug, "i");
-MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+MODULE_VERSION(DRV_MODULE_VERSION);
 
 static int tg3_debug = -1;     /* -1 == use TG3_DEF_MSG_ENABLE as value */
+module_param(tg3_debug, int, 0);
+MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
 
 static struct pci_device_id tg3_pci_tbl[] = {
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { 0, }
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+       {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
+       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
+       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
+       {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
+       {}
 };
 
 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
 
-struct {
-       char string[ETH_GSTRING_LEN];
+static const struct {
+       const char string[ETH_GSTRING_LEN];
 } ethtool_stats_keys[TG3_NUM_STATS] = {
        { "rx_octets" },
        { "rx_fragments" },
@@ -302,49 +295,133 @@ struct {
        { "nic_tx_threshold_hit" }
 };
 
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[TG3_NUM_TEST] = {
+       { "nvram test     (online) " },
+       { "link test      (online) " },
+       { "register test  (offline)" },
+       { "memory test    (offline)" },
+       { "loopback test  (offline)" },
+       { "interrupt test (offline)" },
+};
+
+static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
+{
+       writel(val, tp->regs + off);
+}
+
+static u32 tg3_read32(struct tg3 *tp, u32 off)
+{
+       return (readl(tp->regs + off));
+}
+
 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
 {
-       if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
-               unsigned long flags;
+       unsigned long flags;
 
-               spin_lock_irqsave(&tp->indirect_lock, flags);
-               pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
-               pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
-               spin_unlock_irqrestore(&tp->indirect_lock, flags);
-       } else {
-               writel(val, tp->regs + off);
-               if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
-                       readl(tp->regs + off);
-       }
+       spin_lock_irqsave(&tp->indirect_lock, flags);
+       pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+       pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+       spin_unlock_irqrestore(&tp->indirect_lock, flags);
 }
 
-static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
+static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
 {
-       if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
-               unsigned long flags;
+       writel(val, tp->regs + off);
+       readl(tp->regs + off);
+}
 
-               spin_lock_irqsave(&tp->indirect_lock, flags);
-               pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
-               pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
-               spin_unlock_irqrestore(&tp->indirect_lock, flags);
-       } else {
-               unsigned long dest = tp->regs + off;
-               writel(val, dest);
-               readl(dest);    /* always flush PCI write */
+static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
+{
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&tp->indirect_lock, flags);
+       pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+       pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+       spin_unlock_irqrestore(&tp->indirect_lock, flags);
+       return val;
+}
+
+static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+       unsigned long flags;
+
+       if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
+               pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
+                                      TG3_64BIT_REG_LOW, val);
+               return;
+       }
+       if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
+               pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
+                                      TG3_64BIT_REG_LOW, val);
+               return;
+       }
+
+       spin_lock_irqsave(&tp->indirect_lock, flags);
+       pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+       pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+       spin_unlock_irqrestore(&tp->indirect_lock, flags);
+
+       /* In indirect mode when disabling interrupts, we also need
+        * to clear the interrupt bit in the GRC local ctrl register.
+        */
+       if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
+           (val == 0x1)) {
+               pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
+                                      tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
        }
 }
 
-static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
+static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
 {
-       unsigned long mbox = tp->regs + off;
-       writel(val, mbox);
-       if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
-               readl(mbox);
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&tp->indirect_lock, flags);
+       pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+       pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+       spin_unlock_irqrestore(&tp->indirect_lock, flags);
+       return val;
+}
+
+/* usec_wait specifies the wait time in usec when writing to certain registers
+ * where it is unsafe to read back the register without some delay.
+ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
+ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
+ */
+static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
+{
+       if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
+           (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
+               /* Non-posted methods */
+               tp->write32(tp, off, val);
+       else {
+               /* Posted method */
+               tg3_write32(tp, off, val);
+               if (usec_wait)
+                       udelay(usec_wait);
+               tp->read32(tp, off);
+       }
+       /* Wait again after the read for the posted method to guarantee that
+        * the wait time is met.
+        */
+       if (usec_wait)
+               udelay(usec_wait);
+}
+
+static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
+{
+       tp->write32_mbox(tp, off, val);
+       if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
+           !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
+               tp->read32_mbox(tp, off);
 }
 
-static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
+static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
 {
-       unsigned long mbox = tp->regs + off;
+       void __iomem *mbox = tp->regs + off;
        writel(val, mbox);
        if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
                writel(val, mbox);
@@ -352,28 +429,49 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
                readl(mbox);
 }
 
-#define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
-#define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
-#define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
+static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
+{
+       return (readl(tp->regs + off + GRCMBOX_BASE));
+}
+
+static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
+{
+       writel(val, tp->regs + off + GRCMBOX_BASE);
+}
+
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox_f(reg, val)       tw32_mailbox_flush(tp, (reg), (val))
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg)      tp->read32_mbox(tp, reg)
 
-#define tw32(reg,val)          tg3_write_indirect_reg32(tp,(reg),(val))
-#define tw32_f(reg,val)                _tw32_flush(tp,(reg),(val))
-#define tw16(reg,val)          writew(((val) & 0xffff), tp->regs + (reg))
-#define tw8(reg,val)           writeb(((val) & 0xff), tp->regs + (reg))
-#define tr32(reg)              readl(tp->regs + (reg))
-#define tr16(reg)              readw(tp->regs + (reg))
-#define tr8(reg)               readb(tp->regs + (reg))
+#define tw32(reg,val)          tp->write32(tp, reg, val)
+#define tw32_f(reg,val)                _tw32_flush(tp,(reg),(val), 0)
+#define tw32_wait_f(reg,val,us)        _tw32_flush(tp,(reg),(val), (us))
+#define tr32(reg)              tp->read32(tp, reg)
 
 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
 {
        unsigned long flags;
 
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
+           (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
+               return;
+
        spin_lock_irqsave(&tp->indirect_lock, flags);
-       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
-       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+       if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
+               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 
-       /* Always leave this as zero. */
-       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+               /* Always leave this as zero. */
+               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+       } else {
+               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+               tw32_f(TG3PCI_MEM_WIN_DATA, val);
+
+               /* Always leave this as zero. */
+               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+       }
        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 }
 
@@ -381,12 +479,26 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
 {
        unsigned long flags;
 
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
+           (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
+               *val = 0;
+               return;
+       }
+
        spin_lock_irqsave(&tp->indirect_lock, flags);
-       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
-       pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+       if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
+               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+               pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 
-       /* Always leave this as zero. */
-       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+               /* Always leave this as zero. */
+               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+       } else {
+               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+               *val = tr32(TG3PCI_MEM_WIN_DATA);
+
+               /* Always leave this as zero. */
+               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+       }
        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 }
 
@@ -394,28 +506,78 @@ static void tg3_disable_ints(struct tg3 *tp)
 {
        tw32(TG3PCI_MISC_HOST_CTRL,
             (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
-       tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 }
 
 static inline void tg3_cond_int(struct tg3 *tp)
 {
-       if (tp->hw_status->status & SD_STATUS_UPDATED)
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
+           (tp->hw_status->status & SD_STATUS_UPDATED))
                tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+       else
+               tw32(HOSTCC_MODE, tp->coalesce_mode |
+                    (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
 }
 
 static void tg3_enable_ints(struct tg3 *tp)
 {
+       tp->irq_sync = 0;
+       wmb();
+
        tw32(TG3PCI_MISC_HOST_CTRL,
             (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
-       tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
-
+       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                      (tp->last_tag << 24));
+       if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+               tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                              (tp->last_tag << 24));
        tg3_cond_int(tp);
 }
 
+static inline unsigned int tg3_has_work(struct tg3 *tp)
+{
+       struct tg3_hw_status *sblk = tp->hw_status;
+       unsigned int work_exists = 0;
+
+       /* check for phy events */
+       if (!(tp->tg3_flags &
+             (TG3_FLAG_USE_LINKCHG_REG |
+              TG3_FLAG_POLL_SERDES))) {
+               if (sblk->status & SD_STATUS_LINK_CHG)
+                       work_exists = 1;
+       }
+       /* check for RX/TX work to do */
+       if (sblk->idx[0].tx_consumer != tp->tx_cons ||
+           sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
+               work_exists = 1;
+
+       return work_exists;
+}
+
+/* tg3_restart_ints
+ *  similar to tg3_enable_ints, but it accurately determines whether there
+ *  is new work pending and can return without flushing the PIO write
+ *  which reenables interrupts
+ */
+static void tg3_restart_ints(struct tg3 *tp)
+{
+       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                    tp->last_tag << 24);
+       mmiowb();
+
+       /* When doing tagged status, this work check is unnecessary.
+        * The last_tag we write above tells the chip which piece of
+        * work we've completed.
+        */
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
+           tg3_has_work(tp))
+               tw32(HOSTCC_MODE, tp->coalesce_mode |
+                    (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+}
+
 static inline void tg3_netif_stop(struct tg3 *tp)
 {
+       tp->dev->trans_start = jiffies; /* prevent tx timeout */
        netif_poll_disable(tp->dev);
        netif_tx_disable(tp->dev);
 }
@@ -428,7 +590,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
         * (such as after tg3_init_hw)
         */
        netif_poll_enable(tp->dev);
-       tg3_cond_int(tp);
+       tp->hw_status->status |= SD_STATUS_UPDATED;
+       tg3_enable_ints(tp);
 }
 
 static void tg3_switch_clocks(struct tg3 *tp)
@@ -436,25 +599,30 @@ static void tg3_switch_clocks(struct tg3 *tp)
        u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
        u32 orig_clock_ctrl;
 
+       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+               return;
+
        orig_clock_ctrl = clock_ctrl;
        clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
                       CLOCK_CTRL_CLKRUN_OENABLE |
                       0x1f);
        tp->pci_clock_ctrl = clock_ctrl;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
-           (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
-               tw32_f(TG3PCI_CLOCK_CTRL,
-                    clock_ctrl |
-                    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
-               udelay(40);
-               tw32_f(TG3PCI_CLOCK_CTRL,
-                    clock_ctrl | (CLOCK_CTRL_ALTCLK));
-               udelay(40);
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
+                       tw32_wait_f(TG3PCI_CLOCK_CTRL,
+                                   clock_ctrl | CLOCK_CTRL_625_CORE, 40);
+               }
+       } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
+               tw32_wait_f(TG3PCI_CLOCK_CTRL,
+                           clock_ctrl |
+                           (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
+                           40);
+               tw32_wait_f(TG3PCI_CLOCK_CTRL,
+                           clock_ctrl | (CLOCK_CTRL_ALTCLK),
+                           40);
        }
-       tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
-       udelay(40);
+       tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
 }
 
 #define PHY_BUSY_LOOPS 5000
@@ -462,7 +630,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 {
        u32 frame_val;
-       int loops, ret;
+       unsigned int loops;
+       int ret;
 
        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
                tw32_f(MAC_MI_MODE,
@@ -470,18 +639,18 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
                udelay(80);
        }
 
-       *val = 0xffffffff;
+       *val = 0x0;
 
        frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
                      MI_COM_PHY_ADDR_MASK);
        frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
                      MI_COM_REG_ADDR_MASK);
        frame_val |= (MI_COM_CMD_READ | MI_COM_START);
-       
+
        tw32_f(MAC_MI_COM, frame_val);
 
        loops = PHY_BUSY_LOOPS;
-       while (loops-- > 0) {
+       while (loops != 0) {
                udelay(10);
                frame_val = tr32(MAC_MI_COM);
 
@@ -490,10 +659,11 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
                        frame_val = tr32(MAC_MI_COM);
                        break;
                }
+               loops -= 1;
        }
 
        ret = -EBUSY;
-       if (loops > 0) {
+       if (loops != 0) {
                *val = frame_val & MI_COM_DATA_MASK;
                ret = 0;
        }
@@ -509,7 +679,12 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 {
        u32 frame_val;
-       int loops, ret;
+       unsigned int loops;
+       int ret;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+           (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
+               return 0;
 
        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
                tw32_f(MAC_MI_MODE,
@@ -523,11 +698,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
                      MI_COM_REG_ADDR_MASK);
        frame_val |= (val & MI_COM_DATA_MASK);
        frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
-       
+
        tw32_f(MAC_MI_COM, frame_val);
 
        loops = PHY_BUSY_LOOPS;
-       while (loops-- > 0) {
+       while (loops != 0) {
                udelay(10);
                frame_val = tr32(MAC_MI_COM);
                if ((frame_val & MI_COM_BUSY) == 0) {
@@ -535,10 +710,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
                        frame_val = tr32(MAC_MI_COM);
                        break;
                }
+               loops -= 1;
        }
 
        ret = -EBUSY;
-       if (loops > 0)
+       if (loops != 0)
                ret = 0;
 
        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
@@ -556,9 +732,10 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp)
        if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
                return;
 
-       tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
-       tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
-       tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
+       if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
+           !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
+               tg3_writephy(tp, MII_TG3_AUX_CTRL,
+                            (val | (1 << 15) | (1 << 4)));
 }
 
 static int tg3_bmcr_reset(struct tg3 *tp)
@@ -599,9 +776,10 @@ static int tg3_wait_macro_done(struct tg3 *tp)
        while (limit--) {
                u32 tmp32;
 
-               tg3_readphy(tp, 0x16, &tmp32);
-               if ((tmp32 & 0x1000) == 0)
-                       break;
+               if (!tg3_readphy(tp, 0x16, &tmp32)) {
+                       if ((tmp32 & 0x1000) == 0)
+                               break;
+               }
        }
        if (limit <= 0)
                return -EBUSY;
@@ -653,9 +831,9 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
                for (i = 0; i < 6; i += 2) {
                        u32 low, high;
 
-                       tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
-                       tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
-                       if (tg3_wait_macro_done(tp)) {
+                       if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
+                           tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
+                           tg3_wait_macro_done(tp)) {
                                *resetp = 1;
                                return -EBUSY;
                        }
@@ -711,7 +889,9 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
                }
 
                /* Disable transmitter and interrupt.  */
-               tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+               if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
+                       continue;
+
                reg32 |= 0x3000;
                tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 
@@ -720,7 +900,9 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
                             BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
 
                /* Set to master mode.  */
-               tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
+               if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
+                       continue;
+
                tg3_writephy(tp, MII_TG3_CTRL,
                             (MII_TG3_CTRL_AS_MASTER |
                              MII_TG3_CTRL_ENABLE_AS_MASTER));
@@ -758,13 +940,17 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
 
        tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
 
-       tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
-       reg32 &= ~0x3000;
-       tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+       if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
+               reg32 &= ~0x3000;
+               tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+       } else if (!err)
+               err = -EBUSY;
 
        return err;
 }
 
+static void tg3_link_report(struct tg3 *);
+
 /* This will reset the tigon3 PHY if there is no valid
  * link unless the FORCE argument is non-zero.
  */
@@ -773,11 +959,23 @@ static int tg3_phy_reset(struct tg3 *tp)
        u32 phy_status;
        int err;
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               u32 val;
+
+               val = tr32(GRC_MISC_CFG);
+               tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
+               udelay(40);
+       }
        err  = tg3_readphy(tp, MII_BMSR, &phy_status);
        err |= tg3_readphy(tp, MII_BMSR, &phy_status);
        if (err != 0)
                return -EBUSY;
 
+       if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
+               netif_carrier_off(tp->dev);
+               tg3_link_report(tp);
+       }
+
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -814,20 +1012,60 @@ out:
                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
        }
+       else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+               if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
+                       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+                       tg3_writephy(tp, MII_TG3_TEST1,
+                                    MII_TG3_TEST1_TRIM_EN | 0x4);
+               } else
+                       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+       }
        /* Set Extended packet length bit (bit 14) on all chips that */
        /* support jumbo frames */
        if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
                /* Cannot do read-modify-write on 5401 */
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
-       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+       } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
                u32 phy_reg;
 
                /* Set bit 14 with read-modify-write to preserve other bits */
-               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007);
-               tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg);
-               tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
+               if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
+                   !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
+                       tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
+       }
+
+       /* Set phy register 0x10 bit 0 to high fifo elasticity to support
+        * jumbo frames transmission.
+        */
+       if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
+               u32 phy_reg;
+
+               if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
+                   tg3_writephy(tp, MII_TG3_EXT_CTRL,
+                                phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+       }
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               u32 phy_reg;
+
+               /* adjust output voltage */
+               tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
+
+               if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
+                       u32 phy_reg2;
+
+                       tg3_writephy(tp, MII_TG3_EPHY_TEST,
+                                    phy_reg | MII_TG3_EPHY_SHADOW_EN);
+                       /* Enable auto-MDIX */
+                       if (!tg3_readphy(tp, 0x10, &phy_reg2))
+                               tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
+                       tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
+               }
        }
+
        tg3_phy_set_wirespeed(tp);
        return 0;
 }
@@ -836,56 +1074,76 @@ static void tg3_frob_aux_power(struct tg3 *tp)
 {
        struct tg3 *tp_peer = tp;
 
-       if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
+       if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
                return;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
-               tp_peer = pci_get_drvdata(tp->pdev_peer);
-               if (!tp_peer)
-                       BUG();
-       }
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
+               struct net_device *dev_peer;
 
+               dev_peer = pci_get_drvdata(tp->pdev_peer);
+               /* remove_one() may have been run on the peer. */
+               if (!dev_peer)
+                       tp_peer = tp;
+               else
+                       tp_peer = netdev_priv(dev_peer);
+       }
 
        if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
-           (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
+           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
+           (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
+           (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE0 |
-                             GRC_LCLCTRL_GPIO_OE1 |
-                             GRC_LCLCTRL_GPIO_OE2 |
-                             GRC_LCLCTRL_GPIO_OUTPUT0 |
-                             GRC_LCLCTRL_GPIO_OUTPUT1));
-                       udelay(100);
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                   (GRC_LCLCTRL_GPIO_OE0 |
+                                    GRC_LCLCTRL_GPIO_OE1 |
+                                    GRC_LCLCTRL_GPIO_OE2 |
+                                    GRC_LCLCTRL_GPIO_OUTPUT0 |
+                                    GRC_LCLCTRL_GPIO_OUTPUT1),
+                                   100);
                } else {
+                       u32 no_gpio2;
+                       u32 grc_local_ctrl = 0;
+
                        if (tp_peer != tp &&
                            (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
                                return;
 
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE0 |
-                             GRC_LCLCTRL_GPIO_OE1 |
-                             GRC_LCLCTRL_GPIO_OE2 |
-                             GRC_LCLCTRL_GPIO_OUTPUT1 |
-                             GRC_LCLCTRL_GPIO_OUTPUT2));
-                       udelay(100);
+                       /* Workaround to prevent overdrawing Amps. */
+                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                           ASIC_REV_5714) {
+                               grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+                               tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                           grc_local_ctrl, 100);
+                       }
 
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE0 |
-                             GRC_LCLCTRL_GPIO_OE1 |
-                             GRC_LCLCTRL_GPIO_OE2 |
-                             GRC_LCLCTRL_GPIO_OUTPUT0 |
-                             GRC_LCLCTRL_GPIO_OUTPUT1 |
-                             GRC_LCLCTRL_GPIO_OUTPUT2));
-                       udelay(100);
+                       /* On 5753 and variants, GPIO2 cannot be used. */
+                       no_gpio2 = tp->nic_sram_data_cfg &
+                                   NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+                       grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+                                        GRC_LCLCTRL_GPIO_OE1 |
+                                        GRC_LCLCTRL_GPIO_OE2 |
+                                        GRC_LCLCTRL_GPIO_OUTPUT1 |
+                                        GRC_LCLCTRL_GPIO_OUTPUT2;
+                       if (no_gpio2) {
+                               grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+                                                   GRC_LCLCTRL_GPIO_OUTPUT2);
+                       }
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                                   grc_local_ctrl, 100);
 
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE0 |
-                             GRC_LCLCTRL_GPIO_OE1 |
-                             GRC_LCLCTRL_GPIO_OE2 |
-                             GRC_LCLCTRL_GPIO_OUTPUT0 |
-                             GRC_LCLCTRL_GPIO_OUTPUT1));
-                       udelay(100);
+                       grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                                   grc_local_ctrl, 100);
+
+                       if (!no_gpio2) {
+                               grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+                               tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                           grc_local_ctrl, 100);
+                       }
                }
        } else {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
@@ -894,19 +1152,16 @@ static void tg3_frob_aux_power(struct tg3 *tp)
                            (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
                                return;
 
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE1 |
-                             GRC_LCLCTRL_GPIO_OUTPUT1));
-                       udelay(100);
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                   (GRC_LCLCTRL_GPIO_OE1 |
+                                    GRC_LCLCTRL_GPIO_OUTPUT1), 100);
 
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE1));
-                       udelay(100);
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                   GRC_LCLCTRL_GPIO_OE1, 100);
 
-                       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-                            (GRC_LCLCTRL_GPIO_OE1 |
-                             GRC_LCLCTRL_GPIO_OUTPUT1));
-                       udelay(100);
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+                                   (GRC_LCLCTRL_GPIO_OE1 |
+                                    GRC_LCLCTRL_GPIO_OUTPUT1), 100);
                }
        }
 }
@@ -918,8 +1173,41 @@ static int tg3_setup_phy(struct tg3 *, int);
 #define RESET_KIND_SUSPEND     2
 
 static void tg3_write_sig_post_reset(struct tg3 *, int);
+static int tg3_halt_cpu(struct tg3 *, u32);
+static int tg3_nvram_lock(struct tg3 *);
+static void tg3_nvram_unlock(struct tg3 *);
+
+static void tg3_power_down_phy(struct tg3 *tp)
+{
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+               return;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               u32 val;
+
+               tg3_bmcr_reset(tp);
+               val = tr32(GRC_MISC_CFG);
+               tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
+               udelay(40);
+               return;
+       } else {
+               tg3_writephy(tp, MII_TG3_EXT_CTRL,
+                            MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
+       }
+
+       /* The PHY should not be powered down on some chips because
+        * of bugs.
+        */
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+            (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+               return;
+       tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
 
-static int tg3_set_power_state(struct tg3 *tp, int state)
+static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
 {
        u32 misc_host_ctrl;
        u16 power_control, power_caps;
@@ -938,25 +1226,28 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
        power_control |= PCI_PM_CTRL_PME_STATUS;
        power_control &= ~(PCI_PM_CTRL_STATE_MASK);
        switch (state) {
-       case 0:
+       case PCI_D0:
                power_control |= 0;
                pci_write_config_word(tp->pdev,
                                      pm + PCI_PM_CTRL,
                                      power_control);
-               tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
-               udelay(100);
+               udelay(100);    /* Delay after power state change */
+
+               /* Switch out of Vaux if it is a NIC */
+               if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
 
                return 0;
 
-       case 1:
+       case PCI_D1:
                power_control |= 1;
                break;
 
-       case 2:
+       case PCI_D2:
                power_control |= 2;
                break;
 
-       case 3:
+       case PCI_D3hot:
                power_control |= 3;
                break;
 
@@ -980,23 +1271,46 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                tp->link_config.orig_autoneg = tp->link_config.autoneg;
        }
 
-       if (tp->phy_id != PHY_ID_SERDES) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
                tp->link_config.speed = SPEED_10;
                tp->link_config.duplex = DUPLEX_HALF;
                tp->link_config.autoneg = AUTONEG_ENABLE;
                tg3_setup_phy(tp, 0);
        }
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               u32 val;
+
+               val = tr32(GRC_VCPU_EXT_CTRL);
+               tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
+       } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+               int i;
+               u32 val;
+
+               for (i = 0; i < 200; i++) {
+                       tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
+                       if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+                               break;
+                       msleep(1);
+               }
+       }
+       tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+                                            WOL_DRV_STATE_SHUTDOWN |
+                                            WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
+
        pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
 
        if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
                u32 mac_mode;
 
-               if (tp->phy_id != PHY_ID_SERDES) {
+               if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
                        tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
                        udelay(40);
 
-                       mac_mode = MAC_MODE_PORT_MODE_MII;
+                       if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+                               mac_mode = MAC_MODE_PORT_MODE_GMII;
+                       else
+                               mac_mode = MAC_MODE_PORT_MODE_MII;
 
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
                            !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
@@ -1005,7 +1319,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                        mac_mode = MAC_MODE_PORT_MODE_TBI;
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
+               if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
                        tw32(MAC_LED_CTRL, tp->led_ctrl);
 
                if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
@@ -1028,11 +1342,11 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
                             CLOCK_CTRL_TXCLK_DISABLE);
 
-               tw32_f(TG3PCI_CLOCK_CTRL, base_val |
-                    CLOCK_CTRL_ALTCLK |
-                    CLOCK_CTRL_PWRDOWN_PLL133);
-               udelay(40);
-       } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
+               tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
+                           CLOCK_CTRL_PWRDOWN_PLL133, 40);
+       } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+               /* do nothing */
+       } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
                     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
                u32 newbits1, newbits2;
 
@@ -1042,8 +1356,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                                    CLOCK_CTRL_TXCLK_DISABLE |
                                    CLOCK_CTRL_ALTCLK);
                        newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
-               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+               } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
                        newbits1 = CLOCK_CTRL_625_CORE;
                        newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
                } else {
@@ -1051,14 +1364,13 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                        newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
                }
 
-               tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
-               udelay(40);
+               tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
+                           40);
 
-               tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
-               udelay(40);
+               tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
+                           40);
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+               if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                        u32 newbits3;
 
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -1070,27 +1382,50 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                                newbits3 = CLOCK_CTRL_44MHZ_CORE;
                        }
 
-                       tw32_f(TG3PCI_CLOCK_CTRL,
-                                        tp->pci_clock_ctrl | newbits3);
-                       udelay(40);
+                       tw32_wait_f(TG3PCI_CLOCK_CTRL,
+                                   tp->pci_clock_ctrl | newbits3, 40);
                }
        }
 
+       if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
+           !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+               tg3_power_down_phy(tp);
+
        tg3_frob_aux_power(tp);
 
-       /* Finally, set the new power state. */
-       pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
+       /* Workaround for unstable PLL clock */
+       if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
+           (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+               u32 val = tr32(0x7d00);
+
+               val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
+               tw32(0x7d00, val);
+               if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+                       int err;
+
+                       err = tg3_nvram_lock(tp);
+                       tg3_halt_cpu(tp, RX_CPU_BASE);
+                       if (!err)
+                               tg3_nvram_unlock(tp);
+               }
+       }
 
        tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
 
+       /* Finally, set the new power state. */
+       pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
+       udelay(100);    /* Delay after power state change */
+
        return 0;
 }
 
 static void tg3_link_report(struct tg3 *tp)
 {
        if (!netif_carrier_ok(tp->dev)) {
-               printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
-       } else {
+               if (netif_msg_link(tp))
+                       printk(KERN_INFO PFX "%s: Link is down.\n",
+                              tp->dev->name);
+       } else if (netif_msg_link(tp)) {
                printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
                       tp->dev->name,
                       (tp->link_config.active_speed == SPEED_1000 ?
@@ -1114,29 +1449,52 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv
        u32 old_rx_mode = tp->rx_mode;
        u32 old_tx_mode = tp->tx_mode;
 
-       if (local_adv & ADVERTISE_PAUSE_CAP) {
-               if (local_adv & ADVERTISE_PAUSE_ASYM) {
-                       if (remote_adv & LPA_PAUSE_CAP)
-                               new_tg3_flags |=
-                                       (TG3_FLAG_RX_PAUSE |
-                                        TG3_FLAG_TX_PAUSE);
-                       else if (remote_adv & LPA_PAUSE_ASYM)
-                               new_tg3_flags |=
-                                       (TG3_FLAG_RX_PAUSE);
-               } else {
-                       if (remote_adv & LPA_PAUSE_CAP)
-                               new_tg3_flags |=
-                                       (TG3_FLAG_RX_PAUSE |
-                                        TG3_FLAG_TX_PAUSE);
+       if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
+
+               /* Convert 1000BaseX flow control bits to 1000BaseT
+                * bits before resolving flow control.
+                */
+               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+                       local_adv &= ~(ADVERTISE_PAUSE_CAP |
+                                      ADVERTISE_PAUSE_ASYM);
+                       remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
+
+                       if (local_adv & ADVERTISE_1000XPAUSE)
+                               local_adv |= ADVERTISE_PAUSE_CAP;
+                       if (local_adv & ADVERTISE_1000XPSE_ASYM)
+                               local_adv |= ADVERTISE_PAUSE_ASYM;
+                       if (remote_adv & LPA_1000XPAUSE)
+                               remote_adv |= LPA_PAUSE_CAP;
+                       if (remote_adv & LPA_1000XPAUSE_ASYM)
+                               remote_adv |= LPA_PAUSE_ASYM;
+               }
+
+               if (local_adv & ADVERTISE_PAUSE_CAP) {
+                       if (local_adv & ADVERTISE_PAUSE_ASYM) {
+                               if (remote_adv & LPA_PAUSE_CAP)
+                                       new_tg3_flags |=
+                                               (TG3_FLAG_RX_PAUSE |
+                                               TG3_FLAG_TX_PAUSE);
+                               else if (remote_adv & LPA_PAUSE_ASYM)
+                                       new_tg3_flags |=
+                                               (TG3_FLAG_RX_PAUSE);
+                       } else {
+                               if (remote_adv & LPA_PAUSE_CAP)
+                                       new_tg3_flags |=
+                                               (TG3_FLAG_RX_PAUSE |
+                                               TG3_FLAG_TX_PAUSE);
+                       }
+               } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
+                       if ((remote_adv & LPA_PAUSE_CAP) &&
+                       (remote_adv & LPA_PAUSE_ASYM))
+                               new_tg3_flags |= TG3_FLAG_TX_PAUSE;
                }
-       } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
-               if ((remote_adv & LPA_PAUSE_CAP) &&
-                   (remote_adv & LPA_PAUSE_ASYM))
-                       new_tg3_flags |= TG3_FLAG_TX_PAUSE;
-       }
 
-       tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
-       tp->tg3_flags |= new_tg3_flags;
+               tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
+               tp->tg3_flags |= new_tg3_flags;
+       } else {
+               new_tg3_flags = tp->tg3_flags;
+       }
 
        if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
                tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
@@ -1146,7 +1504,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv
        if (old_rx_mode != tp->rx_mode) {
                tw32_f(MAC_RX_MODE, tp->rx_mode);
        }
-       
+
        if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
                tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
        else
@@ -1191,13 +1549,20 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
                break;
 
        default:
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                       *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
+                                SPEED_10;
+                       *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
+                                 DUPLEX_HALF;
+                       break;
+               }
                *speed = SPEED_INVALID;
                *duplex = DUPLEX_INVALID;
                break;
        };
 }
 
-static int tg3_phy_copper_begin(struct tg3 *tp)
+static void tg3_phy_copper_begin(struct tg3 *tp)
 {
        u32 new_adv;
        int i;
@@ -1215,12 +1580,6 @@ static int tg3_phy_copper_begin(struct tg3 *tp)
 
                tg3_writephy(tp, MII_ADVERTISE, new_adv);
        } else if (tp->link_config.speed == SPEED_INVALID) {
-               tp->link_config.advertising =
-                       (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                        ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
-                        ADVERTISED_Autoneg | ADVERTISED_MII);
-
                if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
                        tp->link_config.advertising &=
                                ~(ADVERTISED_1000baseT_Half |
@@ -1312,15 +1671,16 @@ static int tg3_phy_copper_begin(struct tg3 *tp)
                if (tp->link_config.duplex == DUPLEX_FULL)
                        bmcr |= BMCR_FULLDPLX;
 
-               tg3_readphy(tp, MII_BMCR, &orig_bmcr);
-               if (bmcr != orig_bmcr) {
+               if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
+                   (bmcr != orig_bmcr)) {
                        tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
                        for (i = 0; i < 1500; i++) {
                                u32 tmp;
 
                                udelay(10);
-                               tg3_readphy(tp, MII_BMSR, &tmp);
-                               tg3_readphy(tp, MII_BMSR, &tmp);
+                               if (tg3_readphy(tp, MII_BMSR, &tmp) ||
+                                   tg3_readphy(tp, MII_BMSR, &tmp))
+                                       continue;
                                if (!(tmp & BMSR_LSTATUS)) {
                                        udelay(40);
                                        break;
@@ -1333,8 +1693,6 @@ static int tg3_phy_copper_begin(struct tg3 *tp)
                tg3_writephy(tp, MII_BMCR,
                             BMCR_ANENABLE | BMCR_ANRESTART);
        }
-
-       return 0;
 }
 
 static int tg3_init_5401phy_dsp(struct tg3 *tp)
@@ -1365,21 +1723,36 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
        return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp)
+static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
 {
-       u32 adv_reg, all_mask;
+       u32 adv_reg, all_mask = 0;
+
+       if (mask & ADVERTISED_10baseT_Half)
+               all_mask |= ADVERTISE_10HALF;
+       if (mask & ADVERTISED_10baseT_Full)
+               all_mask |= ADVERTISE_10FULL;
+       if (mask & ADVERTISED_100baseT_Half)
+               all_mask |= ADVERTISE_100HALF;
+       if (mask & ADVERTISED_100baseT_Full)
+               all_mask |= ADVERTISE_100FULL;
+
+       if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
+               return 0;
 
-       tg3_readphy(tp, MII_ADVERTISE, &adv_reg);
-       all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
-                   ADVERTISE_100HALF | ADVERTISE_100FULL);
        if ((adv_reg & all_mask) != all_mask)
                return 0;
        if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
-               tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl);
-               all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
-                           MII_TG3_CTRL_ADV_1000_FULL);
+               all_mask = 0;
+               if (mask & ADVERTISED_1000baseT_Half)
+                       all_mask |= ADVERTISE_1000HALF;
+               if (mask & ADVERTISED_1000baseT_Full)
+                       all_mask |= ADVERTISE_1000FULL;
+
+               if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
+                       return 0;
+
                if ((tg3_ctrl & all_mask) != all_mask)
                        return 0;
        }
@@ -1417,8 +1790,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
            netif_carrier_ok(tp->dev)) {
                tg3_readphy(tp, MII_BMSR, &bmsr);
-               tg3_readphy(tp, MII_BMSR, &bmsr);
-               if (!(bmsr & BMSR_LSTATUS))
+               if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+                   !(bmsr & BMSR_LSTATUS))
                        force_reset = 1;
        }
        if (force_reset)
@@ -1426,9 +1799,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 
        if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
                tg3_readphy(tp, MII_BMSR, &bmsr);
-               tg3_readphy(tp, MII_BMSR, &bmsr);
-
-               if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
+               if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
+                   !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
                        bmsr = 0;
 
                if (!(bmsr & BMSR_LSTATUS)) {
@@ -1439,8 +1811,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                        tg3_readphy(tp, MII_BMSR, &bmsr);
                        for (i = 0; i < 1000; i++) {
                                udelay(10);
-                               tg3_readphy(tp, MII_BMSR, &bmsr);
-                               if (bmsr & BMSR_LSTATUS) {
+                               if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+                                   (bmsr & BMSR_LSTATUS)) {
                                        udelay(40);
                                        break;
                                }
@@ -1471,7 +1843,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 
        if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
                tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
-       else
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
                tg3_writephy(tp, MII_TG3_IMASK, ~0);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -1487,11 +1859,23 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
 
+       if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
+               u32 val;
+
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
+               tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
+               if (!(val & (1 << 10))) {
+                       val |= (1 << 10);
+                       tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+                       goto relink;
+               }
+       }
+
        bmsr = 0;
        for (i = 0; i < 100; i++) {
                tg3_readphy(tp, MII_BMSR, &bmsr);
-               tg3_readphy(tp, MII_BMSR, &bmsr);
-               if (bmsr & BMSR_LSTATUS)
+               if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+                   (bmsr & BMSR_LSTATUS))
                        break;
                udelay(40);
        }
@@ -1502,8 +1886,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
                for (i = 0; i < 2000; i++) {
                        udelay(10);
-                       tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
-                       if (aux_stat)
+                       if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
+                           aux_stat)
                                break;
                }
 
@@ -1514,7 +1898,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                bmcr = 0;
                for (i = 0; i < 200; i++) {
                        tg3_readphy(tp, MII_BMCR, &bmcr);
-                       tg3_readphy(tp, MII_BMCR, &bmcr);
+                       if (tg3_readphy(tp, MII_BMCR, &bmcr))
+                               continue;
                        if (bmcr && bmcr != 0x7fff)
                                break;
                        udelay(10);
@@ -1527,7 +1912,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                                /* Force autoneg restart if we are exiting
                                 * low power mode.
                                 */
-                               if (!tg3_copper_is_advertising_all(tp))
+                               if (!tg3_copper_is_advertising_all(tp,
+                                               tp->link_config.advertising))
                                        current_link_up = 0;
                        } else {
                                current_link_up = 0;
@@ -1551,10 +1937,13 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
            (tp->link_config.autoneg == AUTONEG_ENABLE)) {
                u32 local_adv, remote_adv;
 
-               tg3_readphy(tp, MII_ADVERTISE, &local_adv);
+               if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
+                       local_adv = 0;
                local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 
-               tg3_readphy(tp, MII_LPA, &remote_adv);
+               if (tg3_readphy(tp, MII_LPA, &remote_adv))
+                       remote_adv = 0;
+
                remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
 
                /* If we are not advertising full pause capability,
@@ -1566,15 +1955,15 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
                }
        }
-
-       if (current_link_up == 0) {
+relink:
+       if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
                u32 tmp;
 
                tg3_phy_copper_begin(tp);
 
                tg3_readphy(tp, MII_BMSR, &tmp);
-               tg3_readphy(tp, MII_BMSR, &tmp);
-               if (tmp & BMSR_LSTATUS)
+               if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
+                   (tmp & BMSR_LSTATUS))
                        current_link_up = 1;
        }
 
@@ -1616,7 +2005,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        tw32_f(MAC_MODE, tp->mac_mode);
        udelay(40);
 
-       if (tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES)) {
+       if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
                /* Polled via timer. */
                tw32_f(MAC_EVENT, 0);
        } else {
@@ -1965,62 +2354,290 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
 {
        int res = 0;
+       struct tg3_fiber_aneginfo aninfo;
+       int status = ANEG_FAILED;
+       unsigned int tick;
+       u32 tmp;
 
-       if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
-               u32 dig_status;
+       tw32_f(MAC_TX_AUTO_NEG, 0);
 
-               dig_status = tr32(SG_DIG_STATUS);
-               *flags = 0;
-               if (dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
-                       *flags |= MR_LP_ADV_ASYM_PAUSE;
-               if (dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
-                       *flags |= MR_LP_ADV_SYM_PAUSE;
+       tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+       tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
+       udelay(40);
 
-               if ((dig_status & SG_DIG_AUTONEG_COMPLETE) &&
-                   !(dig_status & (SG_DIG_AUTONEG_ERROR |
-                                   SG_DIG_PARTNER_FAULT_MASK)))
-                       res = 1;
-       } else {
-               struct tg3_fiber_aneginfo aninfo;
-               int status = ANEG_FAILED;
-               unsigned int tick;
-               u32 tmp;
+       tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
+       udelay(40);
 
-               tw32_f(MAC_TX_AUTO_NEG, 0);
+       memset(&aninfo, 0, sizeof(aninfo));
+       aninfo.flags |= MR_AN_ENABLE;
+       aninfo.state = ANEG_STATE_UNKNOWN;
+       aninfo.cur_time = 0;
+       tick = 0;
+       while (++tick < 195000) {
+               status = tg3_fiber_aneg_smachine(tp, &aninfo);
+               if (status == ANEG_DONE || status == ANEG_FAILED)
+                       break;
 
-               tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
-               tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
-               udelay(40);
+               udelay(1);
+       }
 
-               tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
-               udelay(40);
+       tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+       tw32_f(MAC_MODE, tp->mac_mode);
+       udelay(40);
 
-               memset(&aninfo, 0, sizeof(aninfo));
-               aninfo.flags |= MR_AN_ENABLE;
-               aninfo.state = ANEG_STATE_UNKNOWN;
-               aninfo.cur_time = 0;
-               tick = 0;
-               while (++tick < 195000) {
-                       status = tg3_fiber_aneg_smachine(tp, &aninfo);
-                       if (status == ANEG_DONE || status == ANEG_FAILED)
-                               break;
+       *flags = aninfo.flags;
 
-                       udelay(1);
+       if (status == ANEG_DONE &&
+           (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
+                            MR_LP_ADV_FULL_DUPLEX)))
+               res = 1;
+
+       return res;
+}
+
+static void tg3_init_bcm8002(struct tg3 *tp)
+{
+       u32 mac_status = tr32(MAC_STATUS);
+       int i;
+
+       /* Reset when initting first time or we have a link. */
+       if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
+           !(mac_status & MAC_STATUS_PCS_SYNCED))
+               return;
+
+       /* Set PLL lock range. */
+       tg3_writephy(tp, 0x16, 0x8007);
+
+       /* SW reset */
+       tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+
+       /* Wait for reset to complete. */
+       /* XXX schedule_timeout() ... */
+       for (i = 0; i < 500; i++)
+               udelay(10);
+
+       /* Config mode; select PMA/Ch 1 regs. */
+       tg3_writephy(tp, 0x10, 0x8411);
+
+       /* Enable auto-lock and comdet, select txclk for tx. */
+       tg3_writephy(tp, 0x11, 0x0a10);
+
+       tg3_writephy(tp, 0x18, 0x00a0);
+       tg3_writephy(tp, 0x16, 0x41ff);
+
+       /* Assert and deassert POR. */
+       tg3_writephy(tp, 0x13, 0x0400);
+       udelay(40);
+       tg3_writephy(tp, 0x13, 0x0000);
+
+       tg3_writephy(tp, 0x11, 0x0a50);
+       udelay(40);
+       tg3_writephy(tp, 0x11, 0x0a10);
+
+       /* Wait for signal to stabilize */
+       /* XXX schedule_timeout() ... */
+       for (i = 0; i < 15000; i++)
+               udelay(10);
+
+       /* Deselect the channel register so we can read the PHYID
+        * later.
+        */
+       tg3_writephy(tp, 0x10, 0x8011);
+}
+
+static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+{
+       u32 sg_dig_ctrl, sg_dig_status;
+       u32 serdes_cfg, expected_sg_dig_ctrl;
+       int workaround, port_a;
+       int current_link_up;
+
+       serdes_cfg = 0;
+       expected_sg_dig_ctrl = 0;
+       workaround = 0;
+       port_a = 1;
+       current_link_up = 0;
+
+       if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
+           tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+               workaround = 1;
+               if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+                       port_a = 0;
+
+               /* preserve bits 0-11,13,14 for signal pre-emphasis */
+               /* preserve bits 20-23 for voltage regulator */
+               serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
+       }
+
+       sg_dig_ctrl = tr32(SG_DIG_CTRL);
+
+       if (tp->link_config.autoneg != AUTONEG_ENABLE) {
+               if (sg_dig_ctrl & (1 << 31)) {
+                       if (workaround) {
+                               u32 val = serdes_cfg;
+
+                               if (port_a)
+                                       val |= 0xc010000;
+                               else
+                                       val |= 0x4010000;
+                               tw32_f(MAC_SERDES_CFG, val);
+                       }
+                       tw32_f(SG_DIG_CTRL, 0x01388400);
+               }
+               if (mac_status & MAC_STATUS_PCS_SYNCED) {
+                       tg3_setup_flow_control(tp, 0, 0);
+                       current_link_up = 1;
                }
+               goto out;
+       }
 
-               tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
-               tw32_f(MAC_MODE, tp->mac_mode);
-               udelay(40);
+       /* Want auto-negotiation.  */
+       expected_sg_dig_ctrl = 0x81388400;
+
+       /* Pause capability */
+       expected_sg_dig_ctrl |= (1 << 11);
+
+       /* Asymettric pause */
+       expected_sg_dig_ctrl |= (1 << 12);
+
+       if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+               if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
+                   tp->serdes_counter &&
+                   ((mac_status & (MAC_STATUS_PCS_SYNCED |
+                                   MAC_STATUS_RCVD_CFG)) ==
+                    MAC_STATUS_PCS_SYNCED)) {
+                       tp->serdes_counter--;
+                       current_link_up = 1;
+                       goto out;
+               }
+restart_autoneg:
+               if (workaround)
+                       tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+               tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
+               udelay(5);
+               tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+
+               tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+       } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+                                MAC_STATUS_SIGNAL_DET)) {
+               sg_dig_status = tr32(SG_DIG_STATUS);
+               mac_status = tr32(MAC_STATUS);
+
+               if ((sg_dig_status & (1 << 1)) &&
+                   (mac_status & MAC_STATUS_PCS_SYNCED)) {
+                       u32 local_adv, remote_adv;
+
+                       local_adv = ADVERTISE_PAUSE_CAP;
+                       remote_adv = 0;
+                       if (sg_dig_status & (1 << 19))
+                               remote_adv |= LPA_PAUSE_CAP;
+                       if (sg_dig_status & (1 << 20))
+                               remote_adv |= LPA_PAUSE_ASYM;
+
+                       tg3_setup_flow_control(tp, local_adv, remote_adv);
+                       current_link_up = 1;
+                       tp->serdes_counter = 0;
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               } else if (!(sg_dig_status & (1 << 1))) {
+                       if (tp->serdes_counter)
+                               tp->serdes_counter--;
+                       else {
+                               if (workaround) {
+                                       u32 val = serdes_cfg;
+
+                                       if (port_a)
+                                               val |= 0xc010000;
+                                       else
+                                               val |= 0x4010000;
+
+                                       tw32_f(MAC_SERDES_CFG, val);
+                               }
 
-               *flags = aninfo.flags;
+                               tw32_f(SG_DIG_CTRL, 0x01388400);
+                               udelay(40);
 
-               if (status == ANEG_DONE &&
-                   (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
-                                    MR_LP_ADV_FULL_DUPLEX)))
-                       res = 1;
+                               /* Link parallel detection - link is up */
+                               /* only if we have PCS_SYNC and not */
+                               /* receiving config code words */
+                               mac_status = tr32(MAC_STATUS);
+                               if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
+                                   !(mac_status & MAC_STATUS_RCVD_CFG)) {
+                                       tg3_setup_flow_control(tp, 0, 0);
+                                       current_link_up = 1;
+                                       tp->tg3_flags2 |=
+                                               TG3_FLG2_PARALLEL_DETECT;
+                                       tp->serdes_counter =
+                                               SERDES_PARALLEL_DET_TIMEOUT;
+                               } else
+                                       goto restart_autoneg;
+                       }
+               }
+       } else {
+               tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
        }
 
-       return res;
+out:
+       return current_link_up;
+}
+
+static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+{
+       int current_link_up = 0;
+
+       if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
+               tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
+               goto out;
+       }
+
+       if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+               u32 flags;
+               int i;
+
+               if (fiber_autoneg(tp, &flags)) {
+                       u32 local_adv, remote_adv;
+
+                       local_adv = ADVERTISE_PAUSE_CAP;
+                       remote_adv = 0;
+                       if (flags & MR_LP_ADV_SYM_PAUSE)
+                               remote_adv |= LPA_PAUSE_CAP;
+                       if (flags & MR_LP_ADV_ASYM_PAUSE)
+                               remote_adv |= LPA_PAUSE_ASYM;
+
+                       tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+                       tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
+                       current_link_up = 1;
+               }
+               for (i = 0; i < 30; i++) {
+                       udelay(20);
+                       tw32_f(MAC_STATUS,
+                              (MAC_STATUS_SYNC_CHANGED |
+                               MAC_STATUS_CFG_CHANGED));
+                       udelay(40);
+                       if ((tr32(MAC_STATUS) &
+                            (MAC_STATUS_SYNC_CHANGED |
+                             MAC_STATUS_CFG_CHANGED)) == 0)
+                               break;
+               }
+
+               mac_status = tr32(MAC_STATUS);
+               if (current_link_up == 0 &&
+                   (mac_status & MAC_STATUS_PCS_SYNCED) &&
+                   !(mac_status & MAC_STATUS_RCVD_CFG))
+                       current_link_up = 1;
+       } else {
+               /* Forcing 1000FD link up. */
+               current_link_up = 1;
+               tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
+
+               tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+               udelay(40);
+       }
+
+out:
+       return current_link_up;
 }
 
 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
@@ -2028,6 +2645,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
        u32 orig_pause_cfg;
        u16 orig_active_speed;
        u8 orig_active_duplex;
+       u32 mac_status;
        int current_link_up;
        int i;
 
@@ -2037,118 +2655,43 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
        orig_active_speed = tp->link_config.active_speed;
        orig_active_duplex = tp->link_config.active_duplex;
 
-       tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
-       tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
-       tw32_f(MAC_MODE, tp->mac_mode);
-       udelay(40);
-
-       if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) {
-               /* Allow time for the hardware to auto-negotiate (195ms) */
-               unsigned int tick = 0;
-
-               while (++tick < 195000) { 
-                       if (tr32(SG_DIG_STATUS) & SG_DIG_AUTONEG_COMPLETE)
-                               break;
-                       udelay(1);
+       if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
+           netif_carrier_ok(tp->dev) &&
+           (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
+               mac_status = tr32(MAC_STATUS);
+               mac_status &= (MAC_STATUS_PCS_SYNCED |
+                              MAC_STATUS_SIGNAL_DET |
+                              MAC_STATUS_CFG_CHANGED |
+                              MAC_STATUS_RCVD_CFG);
+               if (mac_status == (MAC_STATUS_PCS_SYNCED |
+                                  MAC_STATUS_SIGNAL_DET)) {
+                       tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+                                           MAC_STATUS_CFG_CHANGED));
+                       return 0;
                }
-               if (tick >= 195000)
-                       printk(KERN_INFO PFX "%s: HW autoneg failed !\n",
-                           tp->dev->name);
        }
 
-       /* Reset when initting first time or we have a link. */
-       if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
-           (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
-               /* Set PLL lock range. */
-               tg3_writephy(tp, 0x16, 0x8007);
-
-               /* SW reset */
-               tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+       tw32_f(MAC_TX_AUTO_NEG, 0);
 
-               /* Wait for reset to complete. */
-               /* XXX schedule_timeout() ... */
-               for (i = 0; i < 500; i++)
-                       udelay(10);
-
-               /* Config mode; select PMA/Ch 1 regs. */
-               tg3_writephy(tp, 0x10, 0x8411);
-
-               /* Enable auto-lock and comdet, select txclk for tx. */
-               tg3_writephy(tp, 0x11, 0x0a10);
-
-               tg3_writephy(tp, 0x18, 0x00a0);
-               tg3_writephy(tp, 0x16, 0x41ff);
-
-               /* Assert and deassert POR. */
-               tg3_writephy(tp, 0x13, 0x0400);
-               udelay(40);
-               tg3_writephy(tp, 0x13, 0x0000);
-
-               tg3_writephy(tp, 0x11, 0x0a50);
-               udelay(40);
-               tg3_writephy(tp, 0x11, 0x0a10);
-
-               /* Wait for signal to stabilize */
-               /* XXX schedule_timeout() ... */
-               for (i = 0; i < 15000; i++)
-                       udelay(10);
+       tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+       tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+       tw32_f(MAC_MODE, tp->mac_mode);
+       udelay(40);
 
-               /* Deselect the channel register so we can read the PHYID
-                * later.
-                */
-               tg3_writephy(tp, 0x10, 0x8011);
-       }
+       if (tp->phy_id == PHY_ID_BCM8002)
+               tg3_init_bcm8002(tp);
 
-       /* Enable link change interrupt unless serdes polling.  */
-       if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
-               tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
-       else
-               tw32_f(MAC_EVENT, 0);
+       /* Enable link change event even when serdes polling.  */
+       tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
        udelay(40);
 
        current_link_up = 0;
-       if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
-               if (tp->link_config.autoneg == AUTONEG_ENABLE) {
-                       u32 flags;
-  
-                       if (fiber_autoneg(tp, &flags)) {
-                               u32 local_adv, remote_adv;
-
-                               local_adv = ADVERTISE_PAUSE_CAP;
-                               remote_adv = 0;
-                               if (flags & MR_LP_ADV_SYM_PAUSE)
-                                       remote_adv |= LPA_PAUSE_CAP;
-                               if (flags & MR_LP_ADV_ASYM_PAUSE)
-                                       remote_adv |= LPA_PAUSE_ASYM;
-
-                               tg3_setup_flow_control(tp, local_adv, remote_adv);
-
-                               tp->tg3_flags |=
-                                       TG3_FLAG_GOT_SERDES_FLOWCTL;
-                               current_link_up = 1;
-                       }
-                       for (i = 0; i < 60; i++) {
-                               udelay(20);
-                               tw32_f(MAC_STATUS,
-                                    (MAC_STATUS_SYNC_CHANGED |
-                                     MAC_STATUS_CFG_CHANGED));
-                               udelay(40);
-                               if ((tr32(MAC_STATUS) &
-                                    (MAC_STATUS_SYNC_CHANGED |
-                                     MAC_STATUS_CFG_CHANGED)) == 0)
-                                       break;
-                       }
-                       if (current_link_up == 0 &&
-                           (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
-                               current_link_up = 1;
-                       }
-               } else {
-                       /* Forcing 1000FD link up. */
-                       current_link_up = 1;
-                       tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
-               }
-       } else
-               tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
+       mac_status = tr32(MAC_STATUS);
+
+       if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
+               current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
+       else
+               current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
 
        tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
        tw32_f(MAC_MODE, tp->mac_mode);
@@ -2159,19 +2702,26 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
                 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
 
        for (i = 0; i < 100; i++) {
-               udelay(20);
-               tw32_f(MAC_STATUS,
-                    (MAC_STATUS_SYNC_CHANGED |
-                     MAC_STATUS_CFG_CHANGED));
-               udelay(40);
-               if ((tr32(MAC_STATUS) &
-                    (MAC_STATUS_SYNC_CHANGED |
-                     MAC_STATUS_CFG_CHANGED)) == 0)
+               tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+                                   MAC_STATUS_CFG_CHANGED));
+               udelay(5);
+               if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+                                        MAC_STATUS_CFG_CHANGED |
+                                        MAC_STATUS_LNKSTATE_CHANGED)) == 0)
                        break;
        }
 
-       if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
+       mac_status = tr32(MAC_STATUS);
+       if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
                current_link_up = 0;
+               if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+                   tp->serdes_counter == 0) {
+                       tw32_f(MAC_MODE, (tp->mac_mode |
+                                         MAC_MODE_SEND_CONFIGS));
+                       udelay(1);
+                       tw32_f(MAC_MODE, tp->mac_mode);
+               }
+       }
 
        if (current_link_up == 1) {
                tp->link_config.active_speed = SPEED_1000;
@@ -2203,103 +2753,364 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
                        tg3_link_report(tp);
        }
 
-       if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
-               tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
-               udelay(40);
-               if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
-                       tw32_f(MAC_MODE, tp->mac_mode);
-                       udelay(40);
-               }
-       }
-
        return 0;
 }
 
-static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
 {
-       int err;
+       int current_link_up, err = 0;
+       u32 bmsr, bmcr;
+       u16 current_speed;
+       u8 current_duplex;
 
-       if (tp->phy_id == PHY_ID_SERDES) {
-               err = tg3_setup_fiber_phy(tp, force_reset);
-       } else {
-               err = tg3_setup_copper_phy(tp, force_reset);
-       }
+       tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+       tw32_f(MAC_MODE, tp->mac_mode);
+       udelay(40);
 
-       if (tp->link_config.active_speed == SPEED_1000 &&
-           tp->link_config.active_duplex == DUPLEX_HALF)
-               tw32(MAC_TX_LENGTHS,
-                    ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
-                     (6 << TX_LENGTHS_IPG_SHIFT) |
-                     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
-       else
-               tw32(MAC_TX_LENGTHS,
-                    ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
-                     (6 << TX_LENGTHS_IPG_SHIFT) |
-                     (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+       tw32(MAC_EVENT, 0);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
-               if (netif_carrier_ok(tp->dev)) {
-                       tw32(HOSTCC_STAT_COAL_TICKS,
-                            DEFAULT_STAT_COAL_TICKS);
-               } else {
-                       tw32(HOSTCC_STAT_COAL_TICKS, 0);
-               }
-       }
+       tw32_f(MAC_STATUS,
+            (MAC_STATUS_SYNC_CHANGED |
+             MAC_STATUS_CFG_CHANGED |
+             MAC_STATUS_MI_COMPLETION |
+             MAC_STATUS_LNKSTATE_CHANGED));
+       udelay(40);
 
-       return err;
-}
+       if (force_reset)
+               tg3_phy_reset(tp);
 
-/* Tigon3 never reports partial packet sends.  So we do not
- * need special logic to handle SKBs that have not had all
- * of their frags sent yet, like SunGEM does.
- */
-static void tg3_tx(struct tg3 *tp)
-{
-       u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
-       u32 sw_idx = tp->tx_cons;
+       current_link_up = 0;
+       current_speed = SPEED_INVALID;
+       current_duplex = DUPLEX_INVALID;
 
-       while (sw_idx != hw_idx) {
-               struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
-               struct sk_buff *skb = ri->skb;
-               int i;
+       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+               if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+                       bmsr |= BMSR_LSTATUS;
+               else
+                       bmsr &= ~BMSR_LSTATUS;
+       }
 
-               if (unlikely(skb == NULL))
-                       BUG();
+       err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 
-               pci_unmap_single(tp->pdev,
-                                pci_unmap_addr(ri, mapping),
-                                skb_headlen(skb),
-                                PCI_DMA_TODEVICE);
+       if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
+           (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+               /* do nothing, just check for link up at the end */
+       } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+               u32 adv, new_adv;
 
-               ri->skb = NULL;
+               err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+               new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+                                 ADVERTISE_1000XPAUSE |
+                                 ADVERTISE_1000XPSE_ASYM |
+                                 ADVERTISE_SLCT);
 
-               sw_idx = NEXT_TX(sw_idx);
+               /* Always advertise symmetric PAUSE just like copper */
+               new_adv |= ADVERTISE_1000XPAUSE;
 
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       if (unlikely(sw_idx == hw_idx))
-                               BUG();
+               if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
+                       new_adv |= ADVERTISE_1000XHALF;
+               if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
+                       new_adv |= ADVERTISE_1000XFULL;
 
-                       ri = &tp->tx_buffers[sw_idx];
-                       if (unlikely(ri->skb != NULL))
-                               BUG();
+               if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
+                       tg3_writephy(tp, MII_ADVERTISE, new_adv);
+                       bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
+                       tg3_writephy(tp, MII_BMCR, bmcr);
 
-                       pci_unmap_page(tp->pdev,
-                                      pci_unmap_addr(ri, mapping),
-                                      skb_shinfo(skb)->frags[i].size,
-                                      PCI_DMA_TODEVICE);
+                       tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+                       tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
 
-                       sw_idx = NEXT_TX(sw_idx);
+                       return err;
                }
+       } else {
+               u32 new_bmcr;
 
-               dev_kfree_skb_irq(skb);
-       }
+               bmcr &= ~BMCR_SPEED1000;
+               new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
 
-       tp->tx_cons = sw_idx;
+               if (tp->link_config.duplex == DUPLEX_FULL)
+                       new_bmcr |= BMCR_FULLDPLX;
 
-       if (netif_queue_stopped(tp->dev) &&
-           (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
-               netif_wake_queue(tp->dev);
+               if (new_bmcr != bmcr) {
+                       /* BMCR_SPEED1000 is a reserved bit that needs
+                        * to be set on write.
+                        */
+                       new_bmcr |= BMCR_SPEED1000;
+
+                       /* Force a linkdown */
+                       if (netif_carrier_ok(tp->dev)) {
+                               u32 adv;
+
+                               err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+                               adv &= ~(ADVERTISE_1000XFULL |
+                                        ADVERTISE_1000XHALF |
+                                        ADVERTISE_SLCT);
+                               tg3_writephy(tp, MII_ADVERTISE, adv);
+                               tg3_writephy(tp, MII_BMCR, bmcr |
+                                                          BMCR_ANRESTART |
+                                                          BMCR_ANENABLE);
+                               udelay(10);
+                               netif_carrier_off(tp->dev);
+                       }
+                       tg3_writephy(tp, MII_BMCR, new_bmcr);
+                       bmcr = new_bmcr;
+                       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+                       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                           ASIC_REV_5714) {
+                               if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+                                       bmsr |= BMSR_LSTATUS;
+                               else
+                                       bmsr &= ~BMSR_LSTATUS;
+                       }
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               }
+       }
+
+       if (bmsr & BMSR_LSTATUS) {
+               current_speed = SPEED_1000;
+               current_link_up = 1;
+               if (bmcr & BMCR_FULLDPLX)
+                       current_duplex = DUPLEX_FULL;
+               else
+                       current_duplex = DUPLEX_HALF;
+
+               if (bmcr & BMCR_ANENABLE) {
+                       u32 local_adv, remote_adv, common;
+
+                       err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
+                       err |= tg3_readphy(tp, MII_LPA, &remote_adv);
+                       common = local_adv & remote_adv;
+                       if (common & (ADVERTISE_1000XHALF |
+                                     ADVERTISE_1000XFULL)) {
+                               if (common & ADVERTISE_1000XFULL)
+                                       current_duplex = DUPLEX_FULL;
+                               else
+                                       current_duplex = DUPLEX_HALF;
+
+                               tg3_setup_flow_control(tp, local_adv,
+                                                      remote_adv);
+                       }
+                       else
+                               current_link_up = 0;
+               }
+       }
+
+       tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+       if (tp->link_config.active_duplex == DUPLEX_HALF)
+               tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+       tw32_f(MAC_MODE, tp->mac_mode);
+       udelay(40);
+
+       tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+
+       tp->link_config.active_speed = current_speed;
+       tp->link_config.active_duplex = current_duplex;
+
+       if (current_link_up != netif_carrier_ok(tp->dev)) {
+               if (current_link_up)
+                       netif_carrier_on(tp->dev);
+               else {
+                       netif_carrier_off(tp->dev);
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               }
+               tg3_link_report(tp);
+       }
+       return err;
+}
+
+static void tg3_serdes_parallel_detect(struct tg3 *tp)
+{
+       if (tp->serdes_counter) {
+               /* Give autoneg time to complete. */
+               tp->serdes_counter--;
+               return;
+       }
+       if (!netif_carrier_ok(tp->dev) &&
+           (tp->link_config.autoneg == AUTONEG_ENABLE)) {
+               u32 bmcr;
+
+               tg3_readphy(tp, MII_BMCR, &bmcr);
+               if (bmcr & BMCR_ANENABLE) {
+                       u32 phy1, phy2;
+
+                       /* Select shadow register 0x1f */
+                       tg3_writephy(tp, 0x1c, 0x7c00);
+                       tg3_readphy(tp, 0x1c, &phy1);
+
+                       /* Select expansion interrupt status register */
+                       tg3_writephy(tp, 0x17, 0x0f01);
+                       tg3_readphy(tp, 0x15, &phy2);
+                       tg3_readphy(tp, 0x15, &phy2);
+
+                       if ((phy1 & 0x10) && !(phy2 & 0x20)) {
+                               /* We have signal detect and not receiving
+                                * config code words, link is up by parallel
+                                * detection.
+                                */
+
+                               bmcr &= ~BMCR_ANENABLE;
+                               bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+                               tg3_writephy(tp, MII_BMCR, bmcr);
+                               tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
+                       }
+               }
+       }
+       else if (netif_carrier_ok(tp->dev) &&
+                (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+                (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+               u32 phy2;
+
+               /* Select expansion interrupt status register */
+               tg3_writephy(tp, 0x17, 0x0f01);
+               tg3_readphy(tp, 0x15, &phy2);
+               if (phy2 & 0x20) {
+                       u32 bmcr;
+
+                       /* Config code words received, turn on autoneg. */
+                       tg3_readphy(tp, MII_BMCR, &bmcr);
+                       tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
+
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+
+               }
+       }
+}
+
+static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+{
+       int err;
+
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+               err = tg3_setup_fiber_phy(tp, force_reset);
+       } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+               err = tg3_setup_fiber_mii_phy(tp, force_reset);
+       } else {
+               err = tg3_setup_copper_phy(tp, force_reset);
+       }
+
+       if (tp->link_config.active_speed == SPEED_1000 &&
+           tp->link_config.active_duplex == DUPLEX_HALF)
+               tw32(MAC_TX_LENGTHS,
+                    ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+                     (6 << TX_LENGTHS_IPG_SHIFT) |
+                     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+       else
+               tw32(MAC_TX_LENGTHS,
+                    ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+                     (6 << TX_LENGTHS_IPG_SHIFT) |
+                     (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+               if (netif_carrier_ok(tp->dev)) {
+                       tw32(HOSTCC_STAT_COAL_TICKS,
+                            tp->coal.stats_block_coalesce_usecs);
+               } else {
+                       tw32(HOSTCC_STAT_COAL_TICKS, 0);
+               }
+       }
+
+       return err;
+}
+
+/* This is called whenever we suspect that the system chipset is re-
+ * ordering the sequence of MMIO to the tx send mailbox. The symptom
+ * is bogus tx completions. We try to recover by setting the
+ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
+ * in the workqueue.
+ */
+static void tg3_tx_recover(struct tg3 *tp)
+{
+       BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
+              tp->write32_tx_mbox == tg3_write_indirect_mbox);
+
+       printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
+              "mapped I/O cycles to the network device, attempting to "
+              "recover. Please report the problem to the driver maintainer "
+              "and include system chipset information.\n", tp->dev->name);
+
+       spin_lock(&tp->lock);
+       tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
+       spin_unlock(&tp->lock);
+}
+
+static inline u32 tg3_tx_avail(struct tg3 *tp)
+{
+       smp_mb();
+       return (tp->tx_pending -
+               ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
+}
+
+/* Tigon3 never reports partial packet sends.  So we do not
+ * need special logic to handle SKBs that have not had all
+ * of their frags sent yet, like SunGEM does.
+ */
+static void tg3_tx(struct tg3 *tp)
+{
+       u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
+       u32 sw_idx = tp->tx_cons;
+
+       while (sw_idx != hw_idx) {
+               struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
+               struct sk_buff *skb = ri->skb;
+               int i, tx_bug = 0;
+
+               if (unlikely(skb == NULL)) {
+                       tg3_tx_recover(tp);
+                       return;
+               }
+
+               pci_unmap_single(tp->pdev,
+                                pci_unmap_addr(ri, mapping),
+                                skb_headlen(skb),
+                                PCI_DMA_TODEVICE);
+
+               ri->skb = NULL;
+
+               sw_idx = NEXT_TX(sw_idx);
+
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       ri = &tp->tx_buffers[sw_idx];
+                       if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
+                               tx_bug = 1;
+
+                       pci_unmap_page(tp->pdev,
+                                      pci_unmap_addr(ri, mapping),
+                                      skb_shinfo(skb)->frags[i].size,
+                                      PCI_DMA_TODEVICE);
+
+                       sw_idx = NEXT_TX(sw_idx);
+               }
+
+               dev_kfree_skb(skb);
+
+               if (unlikely(tx_bug)) {
+                       tg3_tx_recover(tp);
+                       return;
+               }
+       }
+
+       tp->tx_cons = sw_idx;
+
+       /* Need to make the tx_cons update visible to tg3_start_xmit()
+        * before checking for netif_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that tg3_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
+
+       if (unlikely(netif_queue_stopped(tp->dev) &&
+                    (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
+               netif_tx_lock(tp->dev);
+               if (netif_queue_stopped(tp->dev) &&
+                   (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
+                       netif_wake_queue(tp->dev);
+               netif_tx_unlock(tp->dev);
+       }
 }
 
 /* Returns size of skb allocated or < 0 on error.
@@ -2330,7 +3141,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
                map = &tp->rx_std_buffers[dest_idx];
                if (src_idx >= 0)
                        src_map = &tp->rx_std_buffers[src_idx];
-               skb_size = RX_PKT_BUF_SZ;
+               skb_size = tp->rx_pkt_buf_sz;
                break;
 
        case RXD_OPAQUE_RING_JUMBO:
@@ -2352,11 +3163,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
         * Callers depend upon this behavior and assume that
         * we leave everything unchanged if we fail.
         */
-       skb = dev_alloc_skb(skb_size);
+       skb = netdev_alloc_skb(tp->dev, skb_size);
        if (skb == NULL)
                return -ENOMEM;
 
-       skb->dev = tp->dev;
        skb_reserve(skb, tp->rx_offset);
 
        mapping = pci_map_single(tp->pdev, skb->data,
@@ -2449,9 +3259,9 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
  */
 static int tg3_rx(struct tg3 *tp, int budget)
 {
-       u32 work_mask;
-       u32 rx_rcb_ptr = tp->rx_rcb_ptr;
-       u16 hw_idx, sw_idx;
+       u32 work_mask, rx_std_posted = 0;
+       u32 sw_idx = tp->rx_rcb_ptr;
+       u16 hw_idx;
        int received;
 
        hw_idx = tp->hw_status->idx[0].rx_producer;
@@ -2460,7 +3270,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
         * the opaque cookie.
         */
        rmb();
-       sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
        work_mask = 0;
        received = 0;
        while (sw_idx != hw_idx && budget > 0) {
@@ -2477,6 +3286,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
                                                  mapping);
                        skb = tp->rx_std_buffers[desc_idx].skb;
                        post_ptr = &tp->rx_std_ptr;
+                       rx_std_posted++;
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
                        dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
                                                  mapping);
@@ -2502,7 +3312,11 @@ static int tg3_rx(struct tg3 *tp, int budget)
 
                len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
 
-               if (len > RX_COPY_THRESHOLD) {
+               if (len > RX_COPY_THRESHOLD
+                       && tp->rx_offset == 2
+                       /* rx_offset != 2 iff this is a 5701 card running
+                        * in PCI-X mode [see tg3_get_invariants()] */
+               ) {
                        int skb_size;
 
                        skb_size = tg3_alloc_rx_skb(tp, opaque_key,
@@ -2521,11 +3335,10 @@ static int tg3_rx(struct tg3 *tp, int budget)
                        tg3_recycle_rx(tp, opaque_key,
                                       desc_idx, *post_ptr);
 
-                       copy_skb = dev_alloc_skb(len + 2);
+                       copy_skb = netdev_alloc_skb(tp->dev, len + 2);
                        if (copy_skb == NULL)
                                goto drop_it_no_recycle;
 
-                       copy_skb->dev = tp->dev;
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
@@ -2560,15 +3373,29 @@ static int tg3_rx(struct tg3 *tp, int budget)
 
 next_pkt:
                (*post_ptr)++;
+
+               if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
+                       u32 idx = *post_ptr % TG3_RX_RING_SIZE;
+
+                       tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
+                                    TG3_64BIT_REG_LOW, idx);
+                       work_mask &= ~RXD_OPAQUE_RING_STD;
+                       rx_std_posted = 0;
+               }
 next_pkt_nopost:
-               rx_rcb_ptr++;
-               sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
+               sw_idx++;
+               sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
+
+               /* Refresh hw_idx to see if there is new work */
+               if (sw_idx == hw_idx) {
+                       hw_idx = tp->hw_status->idx[0].rx_producer;
+                       rmb();
+               }
        }
 
        /* ACK the status ring. */
-       tp->rx_rcb_ptr = rx_rcb_ptr;
-       tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
-                    (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
+       tp->rx_rcb_ptr = sw_idx;
+       tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
 
        /* Refill RX ring(s). */
        if (work_mask & RXD_OPAQUE_RING_STD) {
@@ -2581,6 +3408,7 @@ next_pkt_nopost:
                tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
                             sw_idx);
        }
+       mmiowb();
 
        return received;
 }
@@ -2589,11 +3417,8 @@ static int tg3_poll(struct net_device *netdev, int *budget)
 {
        struct tg3 *tp = netdev_priv(netdev);
        struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned long flags;
        int done;
 
-       spin_lock_irqsave(&tp->lock, flags);
-
        /* handle link change and other phy events */
        if (!(tp->tg3_flags &
              (TG3_FLAG_USE_LINKCHG_REG |
@@ -2601,24 +3426,26 @@ static int tg3_poll(struct net_device *netdev, int *budget)
                if (sblk->status & SD_STATUS_LINK_CHG) {
                        sblk->status = SD_STATUS_UPDATED |
                                (sblk->status & ~SD_STATUS_LINK_CHG);
+                       spin_lock(&tp->lock);
                        tg3_setup_phy(tp, 0);
+                       spin_unlock(&tp->lock);
                }
        }
 
        /* run TX completion thread */
        if (sblk->idx[0].tx_consumer != tp->tx_cons) {
-               spin_lock(&tp->tx_lock);
                tg3_tx(tp);
-               spin_unlock(&tp->tx_lock);
+               if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
+                       netif_rx_complete(netdev);
+                       schedule_work(&tp->reset_task);
+                       return 0;
+               }
        }
 
-       spin_unlock_irqrestore(&tp->lock, flags);
-
        /* run RX thread, within the bounds set by NAPI.
         * All RX "locking" is done by ensuring outside
         * code synchronizes with dev->poll()
         */
-       done = 1;
        if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
                int orig_budget = *budget;
                int work_done;
@@ -2630,158 +3457,343 @@ static int tg3_poll(struct net_device *netdev, int *budget)
 
                *budget -= work_done;
                netdev->quota -= work_done;
-
-               if (work_done >= orig_budget)
-                       done = 0;
        }
 
+       if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+               tp->last_tag = sblk->status_tag;
+               rmb();
+       } else
+               sblk->status &= ~SD_STATUS_UPDATED;
+
        /* if no more work, tell net stack and NIC we're done */
+       done = !tg3_has_work(tp);
        if (done) {
-               spin_lock_irqsave(&tp->lock, flags);
-               __netif_rx_complete(netdev);
-               tg3_enable_ints(tp);
-               spin_unlock_irqrestore(&tp->lock, flags);
+               netif_rx_complete(netdev);
+               tg3_restart_ints(tp);
        }
 
        return (done ? 0 : 1);
 }
 
-static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
+static void tg3_irq_quiesce(struct tg3 *tp)
 {
-       struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned int work_exists = 0;
+       BUG_ON(tp->irq_sync);
 
-       /* check for phy events */
-       if (!(tp->tg3_flags &
-             (TG3_FLAG_USE_LINKCHG_REG |
-              TG3_FLAG_POLL_SERDES))) {
-               if (sblk->status & SD_STATUS_LINK_CHG)
-                       work_exists = 1;
-       }
-       /* check for RX/TX work to do */
-       if (sblk->idx[0].tx_consumer != tp->tx_cons ||
-           sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
-               work_exists = 1;
+       tp->irq_sync = 1;
+       smp_mb();
 
-       return work_exists;
+       synchronize_irq(tp->pdev->irq);
+}
+
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+       return tp->irq_sync;
+}
+
+/* Fully shutdown all tg3 driver activity elsewhere in the system.
+ * If irq_sync is non-zero, then the IRQ handler must be synchronized
+ * with as well.  Most of the time, this is not necessary except when
+ * shutting down the device.
+ */
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+{
+       if (irq_sync)
+               tg3_irq_quiesce(tp);
+       spin_lock_bh(&tp->lock);
+}
+
+static inline void tg3_full_unlock(struct tg3 *tp)
+{
+       spin_unlock_bh(&tp->lock);
+}
+
+/* One-shot MSI handler - Chip automatically disables interrupt
+ * after sending MSI so driver doesn't have to do it.
+ */
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+
+       prefetch(tp->hw_status);
+       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+
+       if (likely(!tg3_irq_sync(tp)))
+               netif_rx_schedule(dev);         /* schedule NAPI poll */
+
+       return IRQ_HANDLED;
 }
 
-static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+/* MSI ISR - No need to check for interrupt sharing and no need to
+ * flush status block and interrupt mailbox. PCI ordering rules
+ * guarantee that MSI will arrive after the status block.
+ */
+static irqreturn_t tg3_msi(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+
+       prefetch(tp->hw_status);
+       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+       /*
+        * Writing any value to intr-mbox-0 clears PCI INTA# and
+        * chip-internal interrupt pending events.
+        * Writing non-zero to intr-mbox-0 additional tells the
+        * NIC to stop sending us irqs, engaging "in-intr-handler"
+        * event coalescing.
+        */
+       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+       if (likely(!tg3_irq_sync(tp)))
+               netif_rx_schedule(dev);         /* schedule NAPI poll */
+
+       return IRQ_RETVAL(1);
+}
+
+static irqreturn_t tg3_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct tg3 *tp = netdev_priv(dev);
        struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned long flags;
        unsigned int handled = 1;
 
-       spin_lock_irqsave(&tp->lock, flags);
-
-       if (sblk->status & SD_STATUS_UPDATED) {
+       /* In INTx mode, it is possible for the interrupt to arrive at
+        * the CPU before the status block posted prior to the interrupt.
+        * Reading the PCI State register will confirm whether the
+        * interrupt is ours and will flush the status block.
+        */
+       if ((sblk->status & SD_STATUS_UPDATED) ||
+           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
                /*
-                * writing any value to intr-mbox-0 clears PCI INTA# and
+                * Writing any value to intr-mbox-0 clears PCI INTA# and
                 * chip-internal interrupt pending events.
-                * writing non-zero to intr-mbox-0 additional tells the
+                * Writing non-zero to intr-mbox-0 additional tells the
                 * NIC to stop sending us irqs, engaging "in-intr-handler"
                 * event coalescing.
                 */
                tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
                             0x00000001);
-               /*
-                * Flush PCI write.  This also guarantees that our
-                * status block has been flushed to host memory.
-                */
-               tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+               if (tg3_irq_sync(tp))
+                       goto out;
                sblk->status &= ~SD_STATUS_UPDATED;
-
-               if (likely(tg3_has_work(dev, tp)))
+               if (likely(tg3_has_work(tp))) {
+                       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
                        netif_rx_schedule(dev);         /* schedule NAPI poll */
-               else {
-                       /* no work, shared interrupt perhaps?  re-enable
+               else {
+                       /* No work, shared interrupt perhaps?  re-enable
                         * interrupts, and flush that PCI write
                         */
-                       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
                                0x00000000);
-                       tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
                }
        } else {        /* shared interrupt */
                handled = 0;
        }
+out:
+       return IRQ_RETVAL(handled);
+}
 
-       spin_unlock_irqrestore(&tp->lock, flags);
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+       struct tg3_hw_status *sblk = tp->hw_status;
+       unsigned int handled = 1;
 
+       /* In INTx mode, it is possible for the interrupt to arrive at
+        * the CPU before the status block posted prior to the interrupt.
+        * Reading the PCI State register will confirm whether the
+        * interrupt is ours and will flush the status block.
+        */
+       if ((sblk->status_tag != tp->last_tag) ||
+           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+               /*
+                * writing any value to intr-mbox-0 clears PCI INTA# and
+                * chip-internal interrupt pending events.
+                * writing non-zero to intr-mbox-0 additional tells the
+                * NIC to stop sending us irqs, engaging "in-intr-handler"
+                * event coalescing.
+                */
+               tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                            0x00000001);
+               if (tg3_irq_sync(tp))
+                       goto out;
+               if (netif_rx_schedule_prep(dev)) {
+                       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+                       /* Update last_tag to mark that this status has been
+                        * seen. Because interrupt may be shared, we may be
+                        * racing with tg3_poll(), so only update last_tag
+                        * if tg3_poll() is not scheduled.
+                        */
+                       tp->last_tag = sblk->status_tag;
+                       __netif_rx_schedule(dev);
+               }
+       } else {        /* shared interrupt */
+               handled = 0;
+       }
+out:
        return IRQ_RETVAL(handled);
 }
 
-static int tg3_init_hw(struct tg3 *);
-static int tg3_halt(struct tg3 *);
+/* ISR for interrupt test */
+static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+       struct tg3_hw_status *sblk = tp->hw_status;
+
+       if ((sblk->status & SD_STATUS_UPDATED) ||
+           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+               tg3_disable_ints(tp);
+               return IRQ_RETVAL(1);
+       }
+       return IRQ_RETVAL(0);
+}
+
+static int tg3_init_hw(struct tg3 *, int);
+static int tg3_halt(struct tg3 *, int, int);
+
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+{
+       int err;
+
+       err = tg3_init_hw(tp, reset_phy);
+       if (err) {
+               printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
+                      "aborting.\n", tp->dev->name);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               tg3_full_unlock(tp);
+               del_timer_sync(&tp->timer);
+               tp->irq_sync = 0;
+               netif_poll_enable(tp->dev);
+               dev_close(tp->dev);
+               tg3_full_lock(tp, 0);
+       }
+       return err;
+}
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void tg3_poll_controller(struct net_device *dev)
 {
-       tg3_interrupt(dev->irq, dev, NULL);
+       struct tg3 *tp = netdev_priv(dev);
+
+       tg3_interrupt(tp->pdev->irq, dev);
 }
 #endif
 
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
 {
-       struct tg3 *tp = _data;
+       struct tg3 *tp = container_of(work, struct tg3, reset_task);
        unsigned int restart_timer;
 
+       tg3_full_lock(tp, 0);
+       tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
+
+       if (!netif_running(tp->dev)) {
+               tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
+               tg3_full_unlock(tp);
+               return;
+       }
+
+       tg3_full_unlock(tp);
+
        tg3_netif_stop(tp);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
 
        restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
        tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
 
-       tg3_halt(tp);
-       tg3_init_hw(tp);
+       if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
+               tp->write32_tx_mbox = tg3_write32_tx_mbox;
+               tp->write32_rx_mbox = tg3_write_flush_reg32;
+               tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+               tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
+       }
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+       if (tg3_init_hw(tp, 1))
+               goto out;
 
        tg3_netif_start(tp);
 
        if (restart_timer)
                mod_timer(&tp->timer, jiffies + 1);
+
+out:
+       tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
+
+       tg3_full_unlock(tp);
 }
 
 static void tg3_tx_timeout(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
-              dev->name);
+       if (netif_msg_tx_err(tp))
+               printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
+                      dev->name);
 
        schedule_work(&tp->reset_task);
 }
 
+/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+       u32 base = (u32) mapping & 0xffffffff;
+
+       return ((base > 0xffffdcc0) &&
+               (base + len + 8 < base));
+}
+
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+                                         int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+       if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
+               return (((u64) mapping + len) > DMA_40BIT_MASK);
+       return 0;
+#else
+       return 0;
+#endif
+}
+
 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
 
-static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
-                                      u32 guilty_entry, int guilty_len,
-                                      u32 last_plus_one, u32 *start, u32 mss)
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
+                                      u32 last_plus_one, u32 *start,
+                                      u32 base_flags, u32 mss)
 {
        struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
-       dma_addr_t new_addr;
+       dma_addr_t new_addr = 0;
        u32 entry = *start;
-       int i;
+       int i, ret = 0;
 
        if (!new_skb) {
-               dev_kfree_skb(skb);
-               return -1;
+               ret = -1;
+       } else {
+               /* New SKB is guaranteed to be linear. */
+               entry = *start;
+               new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+                                         PCI_DMA_TODEVICE);
+               /* Make sure new skb does not cross any 4G boundaries.
+                * Drop the packet if it does.
+                */
+               if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
+                       ret = -1;
+                       dev_kfree_skb(new_skb);
+                       new_skb = NULL;
+               } else {
+                       tg3_set_txd(tp, entry, new_addr, new_skb->len,
+                                   base_flags, 1 | (mss << 1));
+                       *start = NEXT_TX(entry);
+               }
        }
 
-       /* New SKB is guaranteed to be linear. */
-       entry = *start;
-       new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
-                                 PCI_DMA_TODEVICE);
-       tg3_set_txd(tp, entry, new_addr, new_skb->len,
-                   (skb->ip_summed == CHECKSUM_HW) ?
-                   TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
-       *start = NEXT_TX(entry);
-
        /* Now clean up the sw ring entries. */
        i = 0;
        while (entry != last_plus_one) {
@@ -2801,17 +3813,19 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
                        tp->tx_buffers[entry].skb = NULL;
                }
                entry = NEXT_TX(entry);
+               i++;
        }
 
        dev_kfree_skb(skb);
 
-       return 0;
+       return ret;
 }
 
 static void tg3_set_txd(struct tg3 *tp, int entry,
                        dma_addr_t mapping, int len, u32 flags,
                        u32 mss_and_is_end)
 {
+       struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
        int is_end = (mss_and_is_end & 0x1);
        u32 mss = (mss_and_is_end >> 1);
        u32 vlan_tag = 0;
@@ -2823,109 +3837,243 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
                flags &= 0xffff;
        }
        vlan_tag |= (mss << TXD_MSS_SHIFT);
-       if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
-               struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
 
-               txd->addr_hi = ((u64) mapping >> 32);
-               txd->addr_lo = ((u64) mapping & 0xffffffff);
-               txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
-               txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
-       } else {
-               struct tx_ring_info *txr = &tp->tx_buffers[entry];
-               unsigned long txd;
+       txd->addr_hi = ((u64) mapping >> 32);
+       txd->addr_lo = ((u64) mapping & 0xffffffff);
+       txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
+       txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
+}
 
-               txd = (tp->regs +
-                      NIC_SRAM_WIN_BASE +
-                      NIC_SRAM_TX_BUFFER_DESC);
-               txd += (entry * TXD_SIZE);
+/* hard_start_xmit for devices that don't have any bugs and
+ * support TG3_FLG2_HW_TSO_2 only.
+ */
+static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       dma_addr_t mapping;
+       u32 len, entry, base_flags, mss;
+
+       len = skb_headlen(skb);
+
+       /* We are running in BH disabled context with netif_tx_lock
+        * and TX reclaim runs via tp->poll inside of a software
+        * interrupt.  Furthermore, IRQ processing runs lockless so we have
+        * no IRQ context deadlocks to worry about either.  Rejoice!
+        */
+       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+
+                       /* This is a hard error, log it. */
+                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+                              "queue awake!\n", dev->name);
+               }
+               return NETDEV_TX_BUSY;
+       }
+
+       entry = tp->tx_prod;
+       base_flags = 0;
+#if TG3_TSO_SUPPORT != 0
+       mss = 0;
+       if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
+           (mss = skb_shinfo(skb)->gso_size) != 0) {
+               int tcp_opt_len, ip_tcp_len;
+
+               if (skb_header_cloned(skb) &&
+                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+                       dev_kfree_skb(skb);
+                       goto out_unlock;
+               }
 
-               /* Save some PIOs */
-               if (sizeof(dma_addr_t) != sizeof(u32))
-                       writel(((u64) mapping >> 32),
-                              txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+                       mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
+               else {
+                       tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+                       ip_tcp_len = (skb->nh.iph->ihl * 4) +
+                                    sizeof(struct tcphdr);
+
+                       skb->nh.iph->check = 0;
+                       skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
+                                                    tcp_opt_len);
+                       mss |= (ip_tcp_len + tcp_opt_len) << 9;
+               }
+
+               base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+                              TXD_FLAG_CPU_POST_DMA);
+
+               skb->h.th->check = 0;
+
+       }
+       else if (skb->ip_summed == CHECKSUM_PARTIAL)
+               base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#else
+       mss = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#endif
+#if TG3_VLAN_TAG_USED
+       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+               base_flags |= (TXD_FLAG_VLAN |
+                              (vlan_tx_tag_get(skb) << 16));
+#endif
+
+       /* Queue skb data, a.k.a. the main skb fragment. */
+       mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+       tp->tx_buffers[entry].skb = skb;
+       pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
+
+       tg3_set_txd(tp, entry, mapping, len, base_flags,
+                   (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
+
+       entry = NEXT_TX(entry);
+
+       /* Now loop through additional data fragments, and queue them. */
+       if (skb_shinfo(skb)->nr_frags > 0) {
+               unsigned int i, last;
+
+               last = skb_shinfo(skb)->nr_frags - 1;
+               for (i = 0; i <= last; i++) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       len = frag->size;
+                       mapping = pci_map_page(tp->pdev,
+                                              frag->page,
+                                              frag->page_offset,
+                                              len, PCI_DMA_TODEVICE);
+
+                       tp->tx_buffers[entry].skb = NULL;
+                       pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
-               writel(((u64) mapping & 0xffffffff),
-                      txd + TXD_ADDR + TG3_64BIT_REG_LOW);
-               writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
-               if (txr->prev_vlan_tag != vlan_tag) {
-                       writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
-                       txr->prev_vlan_tag = vlan_tag;
+                       tg3_set_txd(tp, entry, mapping, len,
+                                   base_flags, (i == last) | (mss << 1));
+
+                       entry = NEXT_TX(entry);
                }
        }
+
+       /* Packets are ready, update Tx producer idx local and on card. */
+       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+
+       tp->tx_prod = entry;
+       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
+               netif_stop_queue(dev);
+               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
+                       netif_wake_queue(tp->dev);
+       }
+
+out_unlock:
+       mmiowb();
+
+       dev->trans_start = jiffies;
+
+       return NETDEV_TX_OK;
 }
 
-static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+#if TG3_TSO_SUPPORT != 0
+static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
+
+/* Use GSO to workaround a rare TSO bug that may be triggered when the
+ * TSO header is greater than 80 bytes.
+ */
+static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
 {
-       u32 base = (u32) mapping & 0xffffffff;
+       struct sk_buff *segs, *nskb;
 
-       return ((base > 0xffffdcc0) &&
-               (base + len + 8 < base));
+       /* Estimate the number of fragments in the worst case */
+       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
+               netif_stop_queue(tp->dev);
+               return NETDEV_TX_BUSY;
+       }
+
+       segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
+       if (unlikely(IS_ERR(segs)))
+               goto tg3_tso_bug_end;
+
+       do {
+               nskb = segs;
+               segs = segs->next;
+               nskb->next = NULL;
+               tg3_start_xmit_dma_bug(nskb, tp->dev);
+       } while (segs);
+
+tg3_tso_bug_end:
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
 }
+#endif
 
-static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
+ * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
+ */
+static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        dma_addr_t mapping;
-       unsigned int i;
        u32 len, entry, base_flags, mss;
        int would_hit_hwbug;
-       unsigned long flags;
 
        len = skb_headlen(skb);
 
-       /* No BH disabling for tx_lock here.  We are running in BH disabled
-        * context and TX reclaim runs via tp->poll inside of a software
-        * interrupt.  Rejoice!
-        *
-        * Actually, things are not so simple.  If we are to take a hw
-        * IRQ here, we can deadlock, consider:
-        *
-        *       CPU1           CPU2
-        *   tg3_start_xmit
-        *   take tp->tx_lock
-        *                      tg3_timer
-        *                      take tp->lock
-        *   tg3_interrupt
-        *   spin on tp->lock
-        *                      spin on tp->tx_lock
-        *
-        * So we really do need to disable interrupts when taking
-        * tx_lock here.
+       /* We are running in BH disabled context with netif_tx_lock
+        * and TX reclaim runs via tp->poll inside of a software
+        * interrupt.  Furthermore, IRQ processing runs lockless so we have
+        * no IRQ context deadlocks to worry about either.  Rejoice!
         */
-       spin_lock_irqsave(&tp->tx_lock, flags);
+       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
 
-       /* This is a hard error, log it. */
-       if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
-               netif_stop_queue(dev);
-               spin_unlock_irqrestore(&tp->tx_lock, flags);
-               printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
-                      dev->name);
-               return 1;
+                       /* This is a hard error, log it. */
+                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+                              "queue awake!\n", dev->name);
+               }
+               return NETDEV_TX_BUSY;
        }
 
        entry = tp->tx_prod;
        base_flags = 0;
-       if (skb->ip_summed == CHECKSUM_HW)
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
                base_flags |= TXD_FLAG_TCPUDP_CSUM;
 #if TG3_TSO_SUPPORT != 0
        mss = 0;
        if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
-           (mss = skb_shinfo(skb)->tso_size) != 0) {
-               int tcp_opt_len, ip_tcp_len;
+           (mss = skb_shinfo(skb)->gso_size) != 0) {
+               int tcp_opt_len, ip_tcp_len, hdr_len;
+
+               if (skb_header_cloned(skb) &&
+                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+                       dev_kfree_skb(skb);
+                       goto out_unlock;
+               }
 
                tcp_opt_len = ((skb->h.th->doff - 5) * 4);
                ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
 
+               hdr_len = ip_tcp_len + tcp_opt_len;
+               if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+                            (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
+                       return (tg3_tso_bug(tp, skb));
+
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
                skb->nh.iph->check = 0;
-               skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
-               skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
-                                                     skb->nh.iph->daddr,
-                                                     0, IPPROTO_TCP, 0);
+               skb->nh.iph->tot_len = htons(mss + hdr_len);
+               if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+                       skb->h.th->check = 0;
+                       base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+               }
+               else {
+                       skb->h.th->check =
+                               ~csum_tcpudp_magic(skb->nh.iph->saddr,
+                                                  skb->nh.iph->daddr,
+                                                  0, IPPROTO_TCP, 0);
+               }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+               if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
                        if (tcp_opt_len || skb->nh.iph->ihl > 5) {
                                int tsflags;
 
@@ -2961,7 +4109,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        would_hit_hwbug = 0;
 
        if (tg3_4g_overflow_test(mapping, len))
-               would_hit_hwbug = entry + 1;
+               would_hit_hwbug = 1;
 
        tg3_set_txd(tp, entry, mapping, len, base_flags,
                    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -2985,14 +4133,13 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        tp->tx_buffers[entry].skb = NULL;
                        pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
 
-                       if (tg3_4g_overflow_test(mapping, len)) {
-                               /* Only one should match. */
-                               if (would_hit_hwbug)
-                                       BUG();
-                               would_hit_hwbug = entry + 1;
-                       }
+                       if (tg3_4g_overflow_test(mapping, len))
+                               would_hit_hwbug = 1;
+
+                       if (tg3_40bit_overflow_test(tp, mapping, len))
+                               would_hit_hwbug = 1;
 
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+                       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
                                tg3_set_txd(tp, entry, mapping, len,
                                            base_flags, (i == last)|(mss << 1));
                        else
@@ -3006,64 +4153,36 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (would_hit_hwbug) {
                u32 last_plus_one = entry;
                u32 start;
-               unsigned int len = 0;
-
-               would_hit_hwbug -= 1;
-               entry = entry - 1 - skb_shinfo(skb)->nr_frags;
-               entry &= (TG3_TX_RING_SIZE - 1);
-               start = entry;
-               i = 0;
-               while (entry != last_plus_one) {
-                       if (i == 0)
-                               len = skb_headlen(skb);
-                       else
-                               len = skb_shinfo(skb)->frags[i-1].size;
 
-                       if (entry == would_hit_hwbug)
-                               break;
-
-                       i++;
-                       entry = NEXT_TX(entry);
-
-               }
+               start = entry - 1 - skb_shinfo(skb)->nr_frags;
+               start &= (TG3_TX_RING_SIZE - 1);
 
                /* If the workaround fails due to memory/mapping
                 * failure, silently drop this packet.
                 */
-               if (tigon3_4gb_hwbug_workaround(tp, skb,
-                                               entry, len,
-                                               last_plus_one,
-                                               &start, mss))
+               if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
+                                               &start, base_flags, mss))
                        goto out_unlock;
 
                entry = start;
        }
 
        /* Packets are ready, update Tx producer idx local and on card. */
-       if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
-               tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
-                             TG3_64BIT_REG_LOW), entry);
-       } else {
-               /* First, make sure tg3 sees last descriptor fully
-                * in SRAM.
-                */
-               if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
-                       tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
-
-               tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
-                             TG3_64BIT_REG_LOW), entry);
-       }
+       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
 
        tp->tx_prod = entry;
-       if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
+       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
                netif_stop_queue(dev);
+               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
+                       netif_wake_queue(tp->dev);
+       }
 
 out_unlock:
-       spin_unlock_irqrestore(&tp->tx_lock, flags);
+       mmiowb();
 
        dev->trans_start = jiffies;
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
@@ -3071,15 +4190,24 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
 {
        dev->mtu = new_mtu;
 
-       if (new_mtu > ETH_DATA_LEN)
-               tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
-       else
-               tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
+       if (new_mtu > ETH_DATA_LEN) {
+               if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+                       tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
+                       ethtool_op_set_tso(dev, 0);
+               }
+               else
+                       tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+       } else {
+               if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+                       tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+               tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
+       }
 }
 
 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct tg3 *tp = netdev_priv(dev);
+       int err;
 
        if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
                return -EINVAL;
@@ -3093,20 +4221,21 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        }
 
        tg3_netif_stop(tp);
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
 
-       tg3_halt(tp);
+       tg3_full_lock(tp, 1);
+
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 
        tg3_set_mtu(dev, tp, new_mtu);
 
-       tg3_init_hw(tp);
+       err = tg3_restart_hw(tp, 0);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
-       tg3_netif_start(tp);
+       if (!err)
+               tg3_netif_start(tp);
 
-       return 0;
+       tg3_full_unlock(tp);
+
+       return err;
 }
 
 /* Free up pending packets in all rx/tx rings.
@@ -3128,7 +4257,7 @@ static void tg3_free_rings(struct tg3 *tp)
                        continue;
                pci_unmap_single(tp->pdev,
                                 pci_unmap_addr(rxp, mapping),
-                                RX_PKT_BUF_SZ - tp->rx_offset,
+                                tp->rx_pkt_buf_sz - tp->rx_offset,
                                 PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(rxp->skb);
                rxp->skb = NULL;
@@ -3188,9 +4317,8 @@ static void tg3_free_rings(struct tg3 *tp)
  * end up in the driver.  tp->{tx,}lock are held and thus
  * we may not sleep.
  */
-static void tg3_init_rings(struct tg3 *tp)
+static int tg3_init_rings(struct tg3 *tp)
 {
-       unsigned long start, end;
        u32 i;
 
        /* Free up all the SKBs. */
@@ -3200,21 +4328,12 @@ static void tg3_init_rings(struct tg3 *tp)
        memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
        memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
        memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+       memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
 
-       if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
-               memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
-       } else {
-               start = (tp->regs +
-                        NIC_SRAM_WIN_BASE +
-                        NIC_SRAM_TX_BUFFER_DESC);
-               end = start + TG3_TX_RING_BYTES;
-               while (start < end) {
-                       writel(0, start);
-                       start += 4;
-               }
-               for (i = 0; i < TG3_TX_RING_SIZE; i++)
-                       tp->tx_buffers[i].prev_vlan_tag = 0;
-       }
+       tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
+       if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
+           (tp->dev->mtu > ETH_DATA_LEN))
+               tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
 
        /* Initialize invariants of the rings, we only set this
         * stuff once.  This works because the card does not
@@ -3224,14 +4343,14 @@ static void tg3_init_rings(struct tg3 *tp)
                struct tg3_rx_buffer_desc *rxd;
 
                rxd = &tp->rx_std[i];
-               rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
+               rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
                        << RXD_LEN_SHIFT;
                rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
                rxd->opaque = (RXD_OPAQUE_RING_STD |
                               (i << RXD_OPAQUE_INDEX_SHIFT));
        }
 
-       if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
                for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
                        struct tg3_rx_buffer_desc *rxd;
 
@@ -3247,18 +4366,38 @@ static void tg3_init_rings(struct tg3 *tp)
 
        /* Now allocate fresh SKBs for each rx ring. */
        for (i = 0; i < tp->rx_pending; i++) {
-               if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
-                                    -1, i) < 0)
+               if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+                       printk(KERN_WARNING PFX
+                              "%s: Using a smaller RX standard ring, "
+                              "only %d out of %d buffers were allocated "
+                              "successfully.\n",
+                              tp->dev->name, i, tp->rx_pending);
+                       if (i == 0)
+                               return -ENOMEM;
+                       tp->rx_pending = i;
                        break;
+               }
        }
 
-       if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
                for (i = 0; i < tp->rx_jumbo_pending; i++) {
                        if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
-                                            -1, i) < 0)
+                                            -1, i) < 0) {
+                               printk(KERN_WARNING PFX
+                                      "%s: Using a smaller RX jumbo ring, "
+                                      "only %d out of %d buffers were "
+                                      "allocated successfully.\n",
+                                      tp->dev->name, i, tp->rx_jumbo_pending);
+                               if (i == 0) {
+                                       tg3_free_rings(tp);
+                                       return -ENOMEM;
+                               }
+                               tp->rx_jumbo_pending = i;
                                break;
+                       }
                }
        }
+       return 0;
 }
 
 /*
@@ -3267,10 +4406,8 @@ static void tg3_init_rings(struct tg3 *tp)
  */
 static void tg3_free_consistent(struct tg3 *tp)
 {
-       if (tp->rx_std_buffers) {
-               kfree(tp->rx_std_buffers);
-               tp->rx_std_buffers = NULL;
-       }
+       kfree(tp->rx_std_buffers);
+       tp->rx_std_buffers = NULL;
        if (tp->rx_std) {
                pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
                                    tp->rx_std, tp->rx_std_mapping);
@@ -3309,7 +4446,7 @@ static void tg3_free_consistent(struct tg3 *tp)
  */
 static int tg3_alloc_consistent(struct tg3 *tp)
 {
-       tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
+       tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
                                      (TG3_RX_RING_SIZE +
                                       TG3_RX_JUMBO_RING_SIZE)) +
                                     (sizeof(struct tx_ring_info) *
@@ -3318,13 +4455,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
        if (!tp->rx_std_buffers)
                return -ENOMEM;
 
-       memset(tp->rx_std_buffers, 0,
-              (sizeof(struct ring_info) *
-               (TG3_RX_RING_SIZE +
-                TG3_RX_JUMBO_RING_SIZE)) +
-              (sizeof(struct tx_ring_info) *
-               TG3_TX_RING_SIZE));
-
        tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
        tp->tx_buffers = (struct tx_ring_info *)
                &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
@@ -3345,15 +4475,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
        if (!tp->rx_rcb)
                goto err_out;
 
-       if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
-               tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
-                                                  &tp->tx_desc_mapping);
-               if (!tp->tx_ring)
-                       goto err_out;
-       } else {
-               tp->tx_ring = NULL;
-               tp->tx_desc_mapping = 0;
-       }
+       tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
+                                          &tp->tx_desc_mapping);
+       if (!tp->tx_ring)
+               goto err_out;
 
        tp->hw_status = pci_alloc_consistent(tp->pdev,
                                             TG3_HW_STATUS_SIZE,
@@ -3382,13 +4507,12 @@ err_out:
 /* To stop a block, clear the enable bit and poll till it
  * clears.  tp->lock is held.
  */
-static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
 {
        unsigned int i;
        u32 val;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
                switch (ofs) {
                case RCVLSC_MODE:
                case DMAC_MODE:
@@ -3416,7 +4540,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
                        break;
        }
 
-       if (i == MAX_WAIT_CNT) {
+       if (i == MAX_WAIT_CNT && !silent) {
                printk(KERN_ERR PFX "tg3_stop_block timed out, "
                       "ofs=%lx enable_bit=%x\n",
                       ofs, enable_bit);
@@ -3427,7 +4551,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
 }
 
 /* tp->lock is held. */
-static int tg3_abort_hw(struct tg3 *tp)
+static int tg3_abort_hw(struct tg3 *tp, int silent)
 {
        int i, err;
 
@@ -3437,22 +4561,20 @@ static int tg3_abort_hw(struct tg3 *tp)
        tw32_f(MAC_RX_MODE, tp->rx_mode);
        udelay(10);
 
-       err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
-       err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
-       err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
-
-       err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
-       err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
-       err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
-       err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
-       if (err)
-               goto out;
+       err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
+
+       err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
 
        tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
        tw32_f(MAC_MODE, tp->mac_mode);
@@ -3470,27 +4592,24 @@ static int tg3_abort_hw(struct tg3 *tp)
                printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
                       "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
                       tp->dev->name, tr32(MAC_TX_MODE));
-               return -ENODEV;
+               err |= -ENODEV;
        }
 
-       err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
-       err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
+       err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
 
        tw32(FTQ_RESET, 0xffffffff);
        tw32(FTQ_RESET, 0x00000000);
 
-       err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
-       err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
-       if (err)
-               goto out;
+       err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 
        if (tp->hw_status)
                memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
        if (tp->hw_stats)
                memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
-out:
        return err;
 }
 
@@ -3500,14 +4619,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
        if (tp->tg3_flags & TG3_FLAG_NVRAM) {
                int i;
 
-               tw32(NVRAM_SWARB, SWARB_REQ_SET1);
-               for (i = 0; i < 8000; i++) {
-                       if (tr32(NVRAM_SWARB) & SWARB_GNT1)
-                               break;
-                       udelay(20);
+               if (tp->nvram_lock_cnt == 0) {
+                       tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+                       for (i = 0; i < 8000; i++) {
+                               if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+                                       break;
+                               udelay(20);
+                       }
+                       if (i == 8000) {
+                               tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+                               return -ENODEV;
+                       }
                }
-               if (i == 8000)
-                       return -ENODEV;
+               tp->nvram_lock_cnt++;
        }
        return 0;
 }
@@ -3515,8 +4639,34 @@ static int tg3_nvram_lock(struct tg3 *tp)
 /* tp->lock is held. */
 static void tg3_nvram_unlock(struct tg3 *tp)
 {
-       if (tp->tg3_flags & TG3_FLAG_NVRAM)
-               tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+       if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+               if (tp->nvram_lock_cnt > 0)
+                       tp->nvram_lock_cnt--;
+               if (tp->nvram_lock_cnt == 0)
+                       tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+       }
+}
+
+/* tp->lock is held. */
+static void tg3_enable_nvram_access(struct tg3 *tp)
+{
+       if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
+           !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
+               u32 nvaccess = tr32(NVRAM_ACCESS);
+
+               tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+       }
+}
+
+/* tp->lock is held. */
+static void tg3_disable_nvram_access(struct tg3 *tp)
+{
+       if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
+           !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
+               u32 nvaccess = tr32(NVRAM_ACCESS);
+
+               tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+       }
 }
 
 /* tp->lock is held. */
@@ -3595,45 +4745,101 @@ static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
        }
 }
 
-/* tp->lock is held. */
-static int tg3_chip_reset(struct tg3 *tp)
+static int tg3_poll_fw(struct tg3 *tp)
 {
-       u32 val;
-       u32 flags_save;
        int i;
+       u32 val;
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704))
-               tg3_nvram_lock(tp);
-
-       /*
-        * We must avoid the readl() that normally takes place.
-        * It locks machines, causes machine checks, and other
-        * fun things.  So, temporarily disable the 5701
-        * hardware workaround, while we do the reset.
-        */
-       flags_save = tp->tg3_flags;
-       tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
-
-       /* do the reset */
-       val = GRC_MISC_CFG_CORECLK_RESET;
-
-       if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
-               if (tr32(0x7e2c) == 0x60) {
-                       tw32(0x7e2c, 0x20);
-               }
-               if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
-                       tw32(GRC_MISC_CFG, (1 << 29));
-                       val |= (1 << 29);
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               /* Wait up to 20ms for init done. */
+               for (i = 0; i < 200; i++) {
+                       if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+                               return 0;
+                       udelay(100);
                }
+               return -ENODEV;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
-               val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+       /* Wait for firmware initialization to complete. */
+       for (i = 0; i < 100000; i++) {
+               tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+               if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+                       break;
+               udelay(10);
+       }
+
+       /* Chip might not be fitted with firmware.  Some Sun onboard
+        * parts are configured like that.  So don't signal the timeout
+        * of the above loop as an error, but do report the lack of
+        * running firmware once.
+        */
+       if (i >= 100000 &&
+           !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
+               tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
+
+               printk(KERN_INFO PFX "%s: No firmware running.\n",
+                      tp->dev->name);
+       }
+
+       return 0;
+}
+
+static void tg3_stop_fw(struct tg3 *);
+
+/* tp->lock is held. */
+static int tg3_chip_reset(struct tg3 *tp)
+{
+       u32 val;
+       void (*write_op)(struct tg3 *, u32, u32);
+       int err;
+
+       tg3_nvram_lock(tp);
+
+       /* No matching tg3_nvram_unlock() after this because
+        * chip reset below will undo the nvram lock.
+        */
+       tp->nvram_lock_cnt = 0;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+               tw32(GRC_FASTBOOT_PC, 0);
+
+       /*
+        * We must avoid the readl() that normally takes place.
+        * It locks machines, causes machine checks, and other
+        * fun things.  So, temporarily disable the 5701
+        * hardware workaround, while we do the reset.
+        */
+       write_op = tp->write32;
+       if (write_op == tg3_write_flush_reg32)
+               tp->write32 = tg3_write32;
+
+       /* do the reset */
+       val = GRC_MISC_CFG_CORECLK_RESET;
+
+       if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+               if (tr32(0x7e2c) == 0x60) {
+                       tw32(0x7e2c, 0x20);
+               }
+               if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+                       tw32(GRC_MISC_CFG, (1 << 29));
+                       val |= (1 << 29);
+               }
+       }
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
+               tw32(GRC_VCPU_EXT_CTRL,
+                    tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
+       }
+
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+               val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
        tw32(GRC_MISC_CFG, val);
 
-       /* restore 5701 hardware bug workaround flag */
-       tp->tg3_flags = flags_save;
+       /* restore 5701 hardware bug workaround write method */
+       tp->write32 = write_op;
 
        /* Unfortunately, we have to delay before the PCI read back.
         * Some 575X chips even will not respond to a PCI cfg access
@@ -3688,14 +4894,42 @@ static int tg3_chip_reset(struct tg3 *tp)
                val |= PCISTATE_RETRY_SAME_DMA;
        pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
 
-       pci_restore_state(tp->pdev, tp->pci_cfg_state);
+       pci_restore_state(tp->pdev);
 
        /* Make sure PCI-X relaxed ordering bit is clear. */
        pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
        val &= ~PCIX_CAPS_RELAXED_ORDERING;
        pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
 
-       tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+               u32 val;
+
+               /* Chip reset on 5780 will reset MSI enable bit,
+                * so need to restore it.
+                */
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+                       u16 ctrl;
+
+                       pci_read_config_word(tp->pdev,
+                                            tp->msi_cap + PCI_MSI_FLAGS,
+                                            &ctrl);
+                       pci_write_config_word(tp->pdev,
+                                             tp->msi_cap + PCI_MSI_FLAGS,
+                                             ctrl | PCI_MSI_FLAGS_ENABLE);
+                       val = tr32(MSGINT_MODE);
+                       tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+               }
+
+               val = tr32(MEMARB_MODE);
+               tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+       } else
+               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+
+       if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+               tg3_stop_fw(tp);
+               tw32(0x5000, 0x400);
+       }
 
        tw32(GRC_MODE, tp->grc_mode);
 
@@ -3713,27 +4947,19 @@ static int tg3_chip_reset(struct tg3 *tp)
                tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
 
-       if (tp->phy_id == PHY_ID_SERDES) {
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
                tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
                tw32_f(MAC_MODE, tp->mac_mode);
+       } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+               tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
+               tw32_f(MAC_MODE, tp->mac_mode);
        } else
                tw32_f(MAC_MODE, 0);
        udelay(40);
 
-       /* Wait for firmware initialization to complete. */
-       for (i = 0; i < 100000; i++) {
-               tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
-               if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
-                       break;
-               udelay(10);
-       }
-       if (i >= 100000 &&
-           !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
-               printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
-                      "firmware will not restart magic=%08x\n",
-                      tp->dev->name, val);
-               return -ENODEV;
-       }
+       err = tg3_poll_fw(tp);
+       if (err)
+               return err;
 
        if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
            tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
@@ -3752,7 +4978,7 @@ static int tg3_chip_reset(struct tg3 *tp)
                tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
                if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
                        tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+                       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
                                tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
                }
        }
@@ -3782,19 +5008,19 @@ static void tg3_stop_fw(struct tg3 *tp)
 }
 
 /* tp->lock is held. */
-static int tg3_halt(struct tg3 *tp)
+static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
        int err;
 
        tg3_stop_fw(tp);
 
-       tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
+       tg3_write_sig_pre_reset(tp, kind);
 
-       tg3_abort_hw(tp);
+       tg3_abort_hw(tp, silent);
        err = tg3_chip_reset(tp);
 
-       tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
-       tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+       tg3_write_sig_legacy(tp, kind);
+       tg3_write_sig_post_reset(tp, kind);
 
        if (err)
                return err;
@@ -3817,7 +5043,7 @@ static int tg3_halt(struct tg3 *tp)
 #define TG3_FW_BSS_ADDR                0x08000a70
 #define TG3_FW_BSS_LEN         0x10
 
-static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
+static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
        0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
        0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
        0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
@@ -3911,7 +5137,7 @@ static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
        0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
 };
 
-static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
+static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
        0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
        0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
        0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
@@ -3936,10 +5162,15 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
 {
        int i;
 
-       if (offset == TX_CPU_BASE &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
-               BUG();
+       BUG_ON(offset == TX_CPU_BASE &&
+           (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               u32 val = tr32(GRC_VCPU_EXT_CTRL);
 
+               tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+               return 0;
+       }
        if (offset == RX_CPU_BASE) {
                for (i = 0; i < 10000; i++) {
                        tw32(offset + CPU_STATE, 0xffffffff);
@@ -3967,48 +5198,52 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
                       (offset == RX_CPU_BASE ? "RX" : "TX"));
                return -ENODEV;
        }
+
+       /* Clear firmware's nvram arbitration. */
+       if (tp->tg3_flags & TG3_FLAG_NVRAM)
+               tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
        return 0;
 }
 
 struct fw_info {
        unsigned int text_base;
        unsigned int text_len;
-       u32 *text_data;
+       const u32 *text_data;
        unsigned int rodata_base;
        unsigned int rodata_len;
-       u32 *rodata_data;
+       const u32 *rodata_data;
        unsigned int data_base;
        unsigned int data_len;
-       u32 *data_data;
+       const u32 *data_data;
 };
 
 /* tp->lock is held. */
 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
                                 int cpu_scratch_size, struct fw_info *info)
 {
-       int err, i;
-       u32 orig_tg3_flags = tp->tg3_flags;
+       int err, lock_err, i;
        void (*write_op)(struct tg3 *, u32, u32);
 
        if (cpu_base == TX_CPU_BASE &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+           (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
                       "TX cpu firmware on %s which is 5705.\n",
                       tp->dev->name);
                return -EINVAL;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
                write_op = tg3_write_mem;
        else
                write_op = tg3_write_indirect_reg32;
 
-       /* Force use of PCI config space for indirect register
-        * write calls.
+       /* It is possible that bootcode is still loading at this point.
+        * Get the nvram lock first before halting the cpu.
         */
-       tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
-
+       lock_err = tg3_nvram_lock(tp);
        err = tg3_halt_cpu(tp, cpu_base);
+       if (!lock_err)
+               tg3_nvram_unlock(tp);
        if (err)
                goto out;
 
@@ -4038,7 +5273,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
        err = 0;
 
 out:
-       tp->tg3_flags = orig_tg3_flags;
        return err;
 }
 
@@ -4112,7 +5346,7 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
 #define TG3_TSO_FW_BSS_ADDR            0x08001b80
 #define TG3_TSO_FW_BSS_LEN             0x894
 
-static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
+static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
        0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
        0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
        0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
@@ -4399,7 +5633,7 @@ static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
        0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
 };
 
-u32 tg3TsoFwRodata[] = {
+static const u32 tg3TsoFwRodata[] = {
        0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
        0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
        0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
@@ -4407,7 +5641,7 @@ u32 tg3TsoFwRodata[] = {
        0x00000000,
 };
 
-u32 tg3TsoFwData[] = {
+static const u32 tg3TsoFwData[] = {
        0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
        0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
        0x00000000,
@@ -4429,7 +5663,7 @@ u32 tg3TsoFwData[] = {
 #define TG3_TSO5_FW_BSS_ADDR           0x00010f50
 #define TG3_TSO5_FW_BSS_LEN            0x88
 
-static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
+static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
        0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
        0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
        0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
@@ -4588,14 +5822,14 @@ static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
        0x00000000, 0x00000000, 0x00000000,
 };
 
-u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
+static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
        0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
        0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
        0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
        0x00000000, 0x00000000, 0x00000000,
 };
 
-u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
+static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
        0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
        0x00000000, 0x00000000, 0x00000000,
 };
@@ -4607,7 +5841,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
        unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
        int err, i;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
                return 0;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -4691,9 +5925,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp)
                tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
                for (i = 0; i < 12; i++) {
                        tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
                        tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
@@ -4714,14 +5947,35 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
 {
        struct tg3 *tp = netdev_priv(dev);
        struct sockaddr *addr = p;
+       int err = 0;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
 
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 
-       spin_lock_irq(&tp->lock);
-       __tg3_set_mac_addr(tp);
-       spin_unlock_irq(&tp->lock);
+       if (!netif_running(dev))
+               return 0;
 
-       return 0;
+       if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+               /* Reset chip so that ASF can re-init any MAC addresses it
+                * needs.
+                */
+               tg3_netif_stop(tp);
+               tg3_full_lock(tp, 1);
+
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               err = tg3_restart_hw(tp, 0);
+               if (!err)
+                       tg3_netif_start(tp);
+               tg3_full_unlock(tp);
+       } else {
+               spin_lock_bh(&tp->lock);
+               __tg3_set_mac_addr(tp);
+               spin_unlock_bh(&tp->lock);
+       }
+
+       return err;
 }
 
 /* tp->lock is held. */
@@ -4739,16 +5993,37 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
                      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
                       maxlen_flags);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                tg3_write_mem(tp,
                              (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
                              nic_addr);
 }
 
 static void __tg3_set_rx_mode(struct net_device *);
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+       tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+       tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+       tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+       tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+               tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+               tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+       }
+       tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+       tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+               u32 val = ec->stats_block_coalesce_usecs;
+
+               if (!netif_carrier_ok(tp->dev))
+                       val = 0;
+
+               tw32(HOSTCC_STAT_COAL_TICKS, val);
+       }
+}
 
 /* tp->lock is held. */
-static int tg3_reset_hw(struct tg3 *tp)
+static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 {
        u32 val, rdmac_mode;
        int i, err, limit;
@@ -4760,11 +6035,12 @@ static int tg3_reset_hw(struct tg3 *tp)
        tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
 
        if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
-               err = tg3_abort_hw(tp);
-               if (err)
-                       return err;
+               tg3_abort_hw(tp, 1);
        }
 
+       if (reset_phy)
+               tg3_phy_reset(tp);
+
        err = tg3_chip_reset(tp);
        if (err)
                return err;
@@ -4799,7 +6075,9 @@ static int tg3_reset_hw(struct tg3 *tp)
         * can only do this after the hardware has been
         * successfully reset.
         */
-       tg3_init_rings(tp);
+       err = tg3_init_rings(tp);
+       if (err)
+               return err;
 
        /* This value is determined during the probe time DMA
         * engine test, tg3_test_dma.
@@ -4810,14 +6088,15 @@ static int tg3_reset_hw(struct tg3 *tp)
                          GRC_MODE_4X_NIC_SEND_RINGS |
                          GRC_MODE_NO_TX_PHDR_CSUM |
                          GRC_MODE_NO_RX_PHDR_CSUM);
-       if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
-               tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
-       else
-               tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
-       if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
-               tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
-       if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
-               tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
+       tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+
+       /* Pseudo-header checksum is done by hardware logic and not
+        * the offload processers, so make the chip do the pseudo-
+        * header checksums on receive.  For transmit it is more
+        * convenient to do the pseudo-header checksum in software
+        * as Linux does that on transmit for us in all cases.
+        */
+       tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
 
        tw32(GRC_MODE,
             tp->grc_mode |
@@ -4830,7 +6109,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32(GRC_MISC_CFG, val);
 
        /* Initialize MBUF/DESC pool. */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
                /* Do nothing.  */
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
                tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
@@ -4858,7 +6137,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        }
 #endif
 
-       if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
+       if (tp->dev->mtu <= ETH_DATA_LEN) {
                tw32(BUFMGR_MB_RDMA_LOW_WATER,
                     tp->bufmgr_config.mbuf_read_dma_low_water);
                tw32(BUFMGR_MB_MACRX_LOW_WATER,
@@ -4891,7 +6170,20 @@ static int tg3_reset_hw(struct tg3 *tp)
        }
 
        /* Setup replenish threshold. */
-       tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
+       val = tp->rx_pending / 8;
+       if (val == 0)
+               val = 1;
+       else if (val > tp->rx_std_max_post)
+               val = tp->rx_std_max_post;
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+                       tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
+
+               if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
+                       val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
+       }
+
+       tw32(RCVBDI_STD_THRESH, val);
 
        /* Initialize TG3_BDINFO's at:
         *  RCVDBDI_STD_BD:     standard eth size rx ring
@@ -4920,8 +6212,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        /* Don't even try to program the JUMBO/MINI buffer descriptor
         * configs on 5705.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
                tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
                     RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
        } else {
@@ -4934,7 +6225,7 @@ static int tg3_reset_hw(struct tg3 *tp)
                /* Setup replenish threshold. */
                tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
 
-               if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
+               if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
                             ((u64) tp->rx_jumbo_mapping >> 32));
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -4953,8 +6244,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        /* There is only one send ring on 5705/5750, no need to explicitly
         * disable the others.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                /* Clear out send RCB ring in SRAM. */
                for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
                        tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
@@ -4966,24 +6256,16 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
        tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
 
-       if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
-               tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
-                              tp->tx_desc_mapping,
-                              (TG3_TX_RING_SIZE <<
-                               BDINFO_FLAGS_MAXLEN_SHIFT),
-                              NIC_SRAM_TX_BUFFER_DESC);
-       } else {
-               tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
-                              0,
-                              BDINFO_FLAGS_DISABLED,
-                              NIC_SRAM_TX_BUFFER_DESC);
-       }
+       tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
+                      tp->tx_desc_mapping,
+                      (TG3_TX_RING_SIZE <<
+                       BDINFO_FLAGS_MAXLEN_SHIFT),
+                      NIC_SRAM_TX_BUFFER_DESC);
 
        /* There is only one receive return ring on 5705/5750, no need
         * to explicitly disable the others.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
                     i += TG3_BDINFO_SIZE) {
                        tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
@@ -5004,7 +6286,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
                     tp->rx_std_ptr);
 
-       tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
+       tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
                                                tp->rx_jumbo_pending : 0;
        tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
                     tp->rx_jumbo_ptr);
@@ -5037,6 +6319,8 @@ static int tg3_reset_hw(struct tg3 *tp)
                      RDMAC_MODE_LNGREAD_ENAB);
        if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
                rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
+
+       /* If statement applies to 5705 and 5750 PCI devices only */
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
@@ -5050,14 +6334,21 @@ static int tg3_reset_hw(struct tg3 *tp)
                }
        }
 
+       if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
+               rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+
 #if TG3_TSO_SUPPORT != 0
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
                rdmac_mode |= (1 << 27);
 #endif
 
        /* Receive/send statistics. */
-       if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
-           (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+               val = tr32(RCVLPC_STATS_ENABLE);
+               val &= ~RCVLPC_STATSENAB_DACK_FIX;
+               tw32(RCVLPC_STATS_ENABLE, val);
+       } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+                  (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
                val = tr32(RCVLPC_STATS_ENABLE);
                val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
                tw32(RCVLPC_STATS_ENABLE, val);
@@ -5078,17 +6369,7 @@ static int tg3_reset_hw(struct tg3 *tp)
                udelay(10);
        }
 
-       tw32(HOSTCC_RXCOL_TICKS, 0);
-       tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
-       tw32(HOSTCC_RXMAX_FRAMES, 1);
-       tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
-               tw32(HOSTCC_RXCOAL_TICK_INT, 0);
-               tw32(HOSTCC_TXCOAL_TICK_INT, 0);
-       }
-       tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
-       tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+       __tg3_set_coalesce(tp, &tp->coal);
 
        /* set status block DMA address */
        tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5096,14 +6377,11 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
             ((u64) tp->status_mapping & 0xffffffff));
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                /* Status/statistics block address.  See tg3_timer,
                 * the tg3_periodic_fetch_stats call there, and
                 * tg3_get_stats to see how this works for 5705/5750 chips.
                 */
-               tw32(HOSTCC_STAT_COAL_TICKS,
-                    DEFAULT_STAT_COAL_TICKS);
                tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
                     ((u64) tp->stats_mapping >> 32));
                tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5116,8 +6394,7 @@ static int tg3_reset_hw(struct tg3 *tp)
 
        tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
        tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
 
        /* Clear statistics/status block in chip, and status block in ram. */
@@ -5129,23 +6406,52 @@ static int tg3_reset_hw(struct tg3 *tp)
        }
        memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
 
+       if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               /* reset to prevent losing 1st rx packet intermittently */
+               tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+               udelay(10);
+       }
+
        tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
                MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
        tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
        udelay(40);
 
-       tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
-               tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
-                                      GRC_LCLCTRL_GPIO_OUTPUT1);
+       /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
+        * If TG3_FLG2_IS_NIC is zero, we should read the
+        * register to preserve the GPIO settings for LOMs. The GPIOs,
+        * whether used as inputs or outputs, are set by boot code after
+        * reset.
+        */
+       if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
+               u32 gpio_mask;
+
+               gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+                           GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+                           GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
+
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+                       gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
+                                    GRC_LCLCTRL_GPIO_OUTPUT3;
+
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+                       gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+               tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
+
+               /* GPIO1 must be driven high for eeprom write protect */
+               if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
+                       tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+                                              GRC_LCLCTRL_GPIO_OUTPUT1);
+       }
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
 
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
-       tr32(MAILBOX_INTERRUPT_0);
+       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
+       tp->last_tag = 0;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
                tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
                udelay(40);
        }
@@ -5156,6 +6462,7 @@ static int tg3_reset_hw(struct tg3 *tp)
               WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
               WDMAC_MODE_LNGREAD_ENAB);
 
+       /* If statement applies to 5705 and 5750 PCI devices only */
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
@@ -5170,6 +6477,11 @@ static int tg3_reset_hw(struct tg3 *tp)
                }
        }
 
+       /* Enable host coalescing bug fix */
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
+               val |= (1 << 29);
+
        tw32_f(WDMAC_MODE, val);
        udelay(40);
 
@@ -5192,8 +6504,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        udelay(40);
 
        tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
        tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
        tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
@@ -5201,7 +6512,7 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
        tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
 #if TG3_TSO_SUPPORT != 0
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
                tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
 #endif
        tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
@@ -5226,6 +6537,9 @@ static int tg3_reset_hw(struct tg3 *tp)
        udelay(100);
 
        tp->rx_mode = RX_MODE_ENABLE;
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+               tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+
        tw32_f(MAC_RX_MODE, tp->rx_mode);
        udelay(10);
 
@@ -5243,16 +6557,18 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32(MAC_LED_CTRL, tp->led_ctrl);
 
        tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
-       if (tp->phy_id == PHY_ID_SERDES) {
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
                tw32_f(MAC_RX_MODE, RX_MODE_RESET);
                udelay(10);
        }
        tw32_f(MAC_RX_MODE, tp->rx_mode);
        udelay(10);
 
-       if (tp->phy_id == PHY_ID_SERDES) {
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
+                       !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
                        /* Set drive transmission level to 1.2V  */
+                       /* only if the signal pre-emphasis bit is not set  */
                        val = tr32(MAC_SERDES_CFG);
                        val &= 0xfffff000;
                        val |= 0x880;
@@ -5268,36 +6584,35 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
-           tp->phy_id == PHY_ID_SERDES) {
-               /* Enable hardware link auto-negotiation */
-               u32 digctrl, txctrl;
-
-               digctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_CRC16_CLEAR_N |
-                   SG_DIG_LOCAL_DUPLEX_STATUS | SG_DIG_LOCAL_LINK_STATUS |
-                   (2 << SG_DIG_SPEED_STATUS_SHIFT) | SG_DIG_FIBER_MODE |
-                   SG_DIG_GBIC_ENABLE;
-
-               txctrl = tr32(MAC_SERDES_CFG);
-               tw32_f(MAC_SERDES_CFG, txctrl | MAC_SERDES_CFG_EDGE_SELECT);
-               tw32_f(SG_DIG_CTRL, digctrl | SG_DIG_SOFT_RESET);
-               tr32(SG_DIG_CTRL);
-               udelay(5);
-               tw32_f(SG_DIG_CTRL, digctrl);
-
+           (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+               /* Use hardware link auto-negotiation */
                tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
        }
 
-       err = tg3_setup_phy(tp, 1);
+       if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
+               u32 tmp;
+
+               tmp = tr32(SERDES_RX_CTRL);
+               tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
+               tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
+               tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
+               tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+       }
+
+       err = tg3_setup_phy(tp, 0);
        if (err)
                return err;
 
-       if (tp->phy_id != PHY_ID_SERDES) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
                u32 tmp;
 
                /* Clear CRC stats. */
-               tg3_readphy(tp, 0x1e, &tmp);
-               tg3_writephy(tp, 0x1e, tmp | 0x8000);
-               tg3_readphy(tp, 0x14, &tmp);
+               if (!tg3_readphy(tp, 0x1e, &tmp)) {
+                       tg3_writephy(tp, 0x1e, tmp | 0x8000);
+                       tg3_readphy(tp, 0x14, &tmp);
+               }
        }
 
        __tg3_set_rx_mode(tp->dev);
@@ -5308,8 +6623,8 @@ static int tg3_reset_hw(struct tg3 *tp)
        tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
        tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+       if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
                limit = 8;
        else
                limit = 16;
@@ -5353,21 +6668,18 @@ static int tg3_reset_hw(struct tg3 *tp)
 
        tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
 
-       if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
-               tg3_enable_ints(tp);
-
        return 0;
 }
 
 /* Called at device open time to get the chip ready for
  * packet processing.  Invoked with tp->lock held.
  */
-static int tg3_init_hw(struct tg3 *tp)
+static int tg3_init_hw(struct tg3 *tp, int reset_phy)
 {
        int err;
 
        /* Force the chip into D0. */
-       err = tg3_set_power_state(tp, 0);
+       err = tg3_set_power_state(tp, PCI_D0);
        if (err)
                goto out;
 
@@ -5375,7 +6687,7 @@ static int tg3_init_hw(struct tg3 *tp)
 
        tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 
-       err = tg3_reset_hw(tp);
+       err = tg3_reset_hw(tp, reset_phy);
 
 out:
        return err;
@@ -5423,42 +6735,47 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
        TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
        TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
        TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
+
+       TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
+       TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+       TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
 }
 
 static void tg3_timer(unsigned long __opaque)
 {
        struct tg3 *tp = (struct tg3 *) __opaque;
-       unsigned long flags;
 
-       spin_lock_irqsave(&tp->lock, flags);
-       spin_lock(&tp->tx_lock);
+       if (tp->irq_sync)
+               goto restart_timer;
 
-       /* All of this garbage is because when using non-tagged
-        * IRQ status the mailbox/status_block protocol the chip
-        * uses with the cpu is race prone.
-        */
-       if (tp->hw_status->status & SD_STATUS_UPDATED) {
-               tw32(GRC_LOCAL_CTRL,
-                    tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
-       } else {
-               tw32(HOSTCC_MODE, tp->coalesce_mode |
-                    (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
-       }
+       spin_lock(&tp->lock);
 
-       if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-               tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
-               spin_unlock(&tp->tx_lock);
-               spin_unlock_irqrestore(&tp->lock, flags);
-               schedule_work(&tp->reset_task);
-               return;
-       }
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+               /* All of this garbage is because when using non-tagged
+                * IRQ status the mailbox/status_block protocol the chip
+                * uses with the cpu is race prone.
+                */
+               if (tp->hw_status->status & SD_STATUS_UPDATED) {
+                       tw32(GRC_LOCAL_CTRL,
+                            tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+               } else {
+                       tw32(HOSTCC_MODE, tp->coalesce_mode |
+                            (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+               }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
-               tg3_periodic_fetch_stats(tp);
+               if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+                       tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
+                       spin_unlock(&tp->lock);
+                       schedule_work(&tp->reset_task);
+                       return;
+               }
+       }
 
        /* This part only runs once per second. */
        if (!--tp->timer_counter) {
+               if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+                       tg3_periodic_fetch_stats(tp);
+
                if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
                        u32 mac_stat;
                        int phy_event;
@@ -5483,31 +6800,53 @@ static void tg3_timer(unsigned long __opaque)
                                need_setup = 1;
                        }
                        if (! netif_carrier_ok(tp->dev) &&
-                           (mac_stat & MAC_STATUS_PCS_SYNCED)) {
+                           (mac_stat & (MAC_STATUS_PCS_SYNCED |
+                                        MAC_STATUS_SIGNAL_DET))) {
                                need_setup = 1;
                        }
                        if (need_setup) {
-                               tw32_f(MAC_MODE,
-                                    (tp->mac_mode &
-                                     ~MAC_MODE_PORT_MODE_MASK));
-                               udelay(40);
-                               tw32_f(MAC_MODE, tp->mac_mode);
-                               udelay(40);
+                               if (!tp->serdes_counter) {
+                                       tw32_f(MAC_MODE,
+                                            (tp->mac_mode &
+                                             ~MAC_MODE_PORT_MODE_MASK));
+                                       udelay(40);
+                                       tw32_f(MAC_MODE, tp->mac_mode);
+                                       udelay(40);
+                               }
                                tg3_setup_phy(tp, 0);
                        }
-               }
+               } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+                       tg3_serdes_parallel_detect(tp);
 
                tp->timer_counter = tp->timer_multiplier;
        }
 
-       /* Heartbeat is only sent once every 120 seconds.  */
+       /* Heartbeat is only sent once every 2 seconds.
+        *
+        * The heartbeat is to tell the ASF firmware that the host
+        * driver is still alive.  In the event that the OS crashes,
+        * ASF needs to reset the hardware to free up the FIFO space
+        * that may be filled with rx packets destined for the host.
+        * If the FIFO is full, ASF will no longer function properly.
+        *
+        * Unintended resets have been reported on real time kernels
+        * where the timer doesn't run on time.  Netpoll will also have
+        * same problem.
+        *
+        * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
+        * to check the ring condition when the heartbeat is expiring
+        * before doing the reset.  This will prevent most unintended
+        * resets.
+        */
        if (!--tp->asf_counter) {
                if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
                        u32 val;
 
-                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
+                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
+                                     FWCMD_NICDRV_ALIVE3);
                        tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
-                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
+                       /* 5 seconds timeout */
+                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
                        val = tr32(GRC_RX_CPU_EVENT);
                        val |= (1 << 14);
                        tw32(GRC_RX_CPU_EVENT, val);
@@ -5515,100 +6854,298 @@ static void tg3_timer(unsigned long __opaque)
                tp->asf_counter = tp->asf_multiplier;
        }
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irqrestore(&tp->lock, flags);
+       spin_unlock(&tp->lock);
 
+restart_timer:
        tp->timer.expires = jiffies + tp->timer_offset;
        add_timer(&tp->timer);
 }
 
-static int tg3_open(struct net_device *dev)
+static int tg3_request_irq(struct tg3 *tp)
 {
-       struct tg3 *tp = netdev_priv(dev);
-       int err;
+       irq_handler_t fn;
+       unsigned long flags;
+       struct net_device *dev = tp->dev;
+
+       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+               fn = tg3_msi;
+               if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+                       fn = tg3_msi_1shot;
+               flags = IRQF_SAMPLE_RANDOM;
+       } else {
+               fn = tg3_interrupt;
+               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+                       fn = tg3_interrupt_tagged;
+               flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
+       }
+       return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
+}
+
+static int tg3_test_interrupt(struct tg3 *tp)
+{
+       struct net_device *dev = tp->dev;
+       int err, i, intr_ok = 0;
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       if (!netif_running(dev))
+               return -ENODEV;
 
        tg3_disable_ints(tp);
-       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       free_irq(tp->pdev->irq, dev);
 
-       /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
-        * tp->tg3_flags is accurate at that new place.
-        */
-       err = tg3_alloc_consistent(tp);
+       err = request_irq(tp->pdev->irq, tg3_test_isr,
+                         IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
        if (err)
                return err;
 
-       err = request_irq(dev->irq, tg3_interrupt,
-                         SA_SHIRQ, dev->name, dev);
+       tp->hw_status->status &= ~SD_STATUS_UPDATED;
+       tg3_enable_ints(tp);
 
-       if (err) {
-               tg3_free_consistent(tp);
-               return err;
-       }
+       tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+              HOSTCC_MODE_NOW);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       for (i = 0; i < 5; i++) {
+               u32 int_mbox, misc_host_ctrl;
 
-       err = tg3_init_hw(tp);
-       if (err) {
-               tg3_halt(tp);
-               tg3_free_rings(tp);
-       } else {
-               tp->timer_offset = HZ / 10;
-               tp->timer_counter = tp->timer_multiplier = 10;
-               tp->asf_counter = tp->asf_multiplier = (10 * 120);
+               int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
+                                       TG3_64BIT_REG_LOW);
+               misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 
-               init_timer(&tp->timer);
-               tp->timer.expires = jiffies + tp->timer_offset;
-               tp->timer.data = (unsigned long) tp;
-               tp->timer.function = tg3_timer;
-               add_timer(&tp->timer);
+               if ((int_mbox != 0) ||
+                   (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
+                       intr_ok = 1;
+                       break;
+               }
 
-               tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+               msleep(10);
        }
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
-
-       if (err) {
-               free_irq(dev->irq, dev);
-               tg3_free_consistent(tp);
-               return err;
-       }
+       tg3_disable_ints(tp);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       free_irq(tp->pdev->irq, dev);
 
-       tg3_enable_ints(tp);
+       err = tg3_request_irq(tp);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       if (err)
+               return err;
 
-       netif_start_queue(dev);
+       if (intr_ok)
+               return 0;
 
-       return 0;
+       return -EIO;
 }
 
-#if 0
-/*static*/ void tg3_dump_state(struct tg3 *tp)
+/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
+ * successfully restored
+ */
+static int tg3_test_msi(struct tg3 *tp)
 {
-       u32 val32, val32_2, val32_3, val32_4, val32_5;
-       u16 val16;
-       int i;
+       struct net_device *dev = tp->dev;
+       int err;
+       u16 pci_cmd;
 
-       pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
-       pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
-       printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
-              val16, val32);
+       if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
+               return 0;
 
-       /* MAC block */
-       printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
-              tr32(MAC_MODE), tr32(MAC_STATUS));
+       /* Turn off SERR reporting in case MSI terminates with Master
+        * Abort.
+        */
+       pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+       pci_write_config_word(tp->pdev, PCI_COMMAND,
+                             pci_cmd & ~PCI_COMMAND_SERR);
+
+       err = tg3_test_interrupt(tp);
+
+       pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+       if (!err)
+               return 0;
+
+       /* other failures */
+       if (err != -EIO)
+               return err;
+
+       /* MSI test failed, go back to INTx mode */
+       printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
+              "switching to INTx mode. Please report this failure to "
+              "the PCI maintainer and include system chipset information.\n",
+                      tp->dev->name);
+
+       free_irq(tp->pdev->irq, dev);
+       pci_disable_msi(tp->pdev);
+
+       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+
+       err = tg3_request_irq(tp);
+       if (err)
+               return err;
+
+       /* Need to reset the chip because the MSI cycle may have terminated
+        * with Master Abort.
+        */
+       tg3_full_lock(tp, 1);
+
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+       err = tg3_init_hw(tp, 1);
+
+       tg3_full_unlock(tp);
+
+       if (err)
+               free_irq(tp->pdev->irq, dev);
+
+       return err;
+}
+
+static int tg3_open(struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int err;
+
+       netif_carrier_off(tp->dev);
+
+       tg3_full_lock(tp, 0);
+
+       err = tg3_set_power_state(tp, PCI_D0);
+       if (err) {
+               tg3_full_unlock(tp);
+               return err;
+       }
+
+       tg3_disable_ints(tp);
+       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+
+       tg3_full_unlock(tp);
+
+       /* The placement of this call is tied
+        * to the setup and use of Host TX descriptors.
+        */
+       err = tg3_alloc_consistent(tp);
+       if (err)
+               return err;
+
+       if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
+           (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
+           (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
+           !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
+             (tp->pdev_peer == tp->pdev))) {
+               /* All MSI supporting chips should support tagged
+                * status.  Assert that this is the case.
+                */
+               if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+                       printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
+                              "Not using MSI.\n", tp->dev->name);
+               } else if (pci_enable_msi(tp->pdev) == 0) {
+                       u32 msi_mode;
+
+                       msi_mode = tr32(MSGINT_MODE);
+                       tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+                       tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
+               }
+       }
+       err = tg3_request_irq(tp);
+
+       if (err) {
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+                       pci_disable_msi(tp->pdev);
+                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+               }
+               tg3_free_consistent(tp);
+               return err;
+       }
+
+       tg3_full_lock(tp, 0);
+
+       err = tg3_init_hw(tp, 1);
+       if (err) {
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               tg3_free_rings(tp);
+       } else {
+               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+                       tp->timer_offset = HZ;
+               else
+                       tp->timer_offset = HZ / 10;
+
+               BUG_ON(tp->timer_offset > HZ);
+               tp->timer_counter = tp->timer_multiplier =
+                       (HZ / tp->timer_offset);
+               tp->asf_counter = tp->asf_multiplier =
+                       ((HZ / tp->timer_offset) * 2);
+
+               init_timer(&tp->timer);
+               tp->timer.expires = jiffies + tp->timer_offset;
+               tp->timer.data = (unsigned long) tp;
+               tp->timer.function = tg3_timer;
+       }
+
+       tg3_full_unlock(tp);
+
+       if (err) {
+               free_irq(tp->pdev->irq, dev);
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+                       pci_disable_msi(tp->pdev);
+                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+               }
+               tg3_free_consistent(tp);
+               return err;
+       }
+
+       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+               err = tg3_test_msi(tp);
+
+               if (err) {
+                       tg3_full_lock(tp, 0);
+
+                       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+                               pci_disable_msi(tp->pdev);
+                               tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+                       }
+                       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+                       tg3_free_rings(tp);
+                       tg3_free_consistent(tp);
+
+                       tg3_full_unlock(tp);
+
+                       return err;
+               }
+
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+                       if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
+                               u32 val = tr32(PCIE_TRANSACTION_CFG);
+
+                               tw32(PCIE_TRANSACTION_CFG,
+                                    val | PCIE_TRANS_CFG_1SHOT_MSI);
+                       }
+               }
+       }
+
+       tg3_full_lock(tp, 0);
+
+       add_timer(&tp->timer);
+       tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+       tg3_enable_ints(tp);
+
+       tg3_full_unlock(tp);
+
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+#if 0
+/*static*/ void tg3_dump_state(struct tg3 *tp)
+{
+       u32 val32, val32_2, val32_3, val32_4, val32_5;
+       u16 val16;
+       int i;
+
+       pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
+       pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
+       printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
+              val16, val32);
+
+       /* MAC block */
+       printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
+              tr32(MAC_MODE), tr32(MAC_STATUS));
        printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
               tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
        printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
@@ -5770,10 +7307,10 @@ static int tg3_open(struct net_device *dev)
 
        /* Mailboxes */
        printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
-              tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
-              tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
-              tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
-              tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
+              tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
+              tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
+              tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
+              tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
 
        /* NIC side send descriptors. */
        for (i = 0; i < 6; i++) {
@@ -5829,29 +7366,37 @@ static int tg3_close(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
+       /* Calling flush_scheduled_work() may deadlock because
+        * linkwatch_event() may be on the workqueue and it will try to get
+        * the rtnl_lock which we are holding.
+        */
+       while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
+               msleep(1);
+
        netif_stop_queue(dev);
 
        del_timer_sync(&tp->timer);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
 #if 0
        tg3_dump_state(tp);
 #endif
 
        tg3_disable_ints(tp);
 
-       tg3_halt(tp);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        tg3_free_rings(tp);
        tp->tg3_flags &=
                ~(TG3_FLAG_INIT_COMPLETE |
                  TG3_FLAG_GOT_SERDES_FLOWCTL);
-       netif_carrier_off(tp->dev);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
-       free_irq(dev->irq, dev);
+       free_irq(tp->pdev->irq, dev);
+       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+               pci_disable_msi(tp->pdev);
+               tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+       }
 
        memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
               sizeof(tp->net_stats_prev));
@@ -5860,6 +7405,10 @@ static int tg3_close(struct net_device *dev)
 
        tg3_free_consistent(tp);
 
+       tg3_set_power_state(tp, PCI_D3hot);
+
+       netif_carrier_off(tp->dev);
+
        return 0;
 }
 
@@ -5879,17 +7428,18 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
 {
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
-       if (tp->phy_id != PHY_ID_SERDES &&
+       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
-               unsigned long flags;
                u32 val;
 
-               spin_lock_irqsave(&tp->lock, flags);
-               tg3_readphy(tp, 0x1e, &val);
-               tg3_writephy(tp, 0x1e, val | 0x8000);
-               tg3_readphy(tp, 0x14, &val);
-               spin_unlock_irqrestore(&tp->lock, flags);
+               spin_lock_bh(&tp->lock);
+               if (!tg3_readphy(tp, 0x1e, &val)) {
+                       tg3_writephy(tp, 0x1e, val | 0x8000);
+                       tg3_readphy(tp, 0x14, &val);
+               } else
+                       val = 0;
+               spin_unlock_bh(&tp->lock);
 
                tp->phy_crc_errors += val;
 
@@ -6005,7 +7555,7 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev)
                get_stat64(&hw_stats->rx_ucast_packets) +
                get_stat64(&hw_stats->rx_mcast_packets) +
                get_stat64(&hw_stats->rx_bcast_packets);
-               
+
        stats->tx_packets = old_stats->tx_packets +
                get_stat64(&hw_stats->tx_ucast_packets) +
                get_stat64(&hw_stats->tx_mcast_packets) +
@@ -6017,8 +7567,7 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev)
                get_stat64(&hw_stats->tx_octets);
 
        stats->rx_errors = old_stats->rx_errors +
-               get_stat64(&hw_stats->rx_errors) +
-               get_stat64(&hw_stats->rx_discards);
+               get_stat64(&hw_stats->rx_errors);
        stats->tx_errors = old_stats->tx_errors +
                get_stat64(&hw_stats->tx_errors) +
                get_stat64(&hw_stats->tx_mac_errors) +
@@ -6046,6 +7595,9 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev)
        stats->rx_crc_errors = old_stats->rx_crc_errors +
                calc_crc_errors(tp);
 
+       stats->rx_missed_errors = old_stats->rx_missed_errors +
+               get_stat64(&hw_stats->rx_discards);
+
        return stats;
 }
 
@@ -6151,9 +7703,12 @@ static void tg3_set_rx_mode(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       spin_lock_irq(&tp->lock);
+       if (!netif_running(dev))
+               return;
+
+       tg3_full_lock(tp, 0);
        __tg3_set_rx_mode(dev);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 }
 
 #define TG3_REGDUMP_LEN                (32 * 1024)
@@ -6175,8 +7730,10 @@ static void tg3_get_regs(struct net_device *dev,
 
        memset(p, 0, TG3_REGDUMP_LEN);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       if (tp->link_config.phy_is_low_power)
+               return;
+
+       tg3_full_lock(tp, 0);
 
 #define __GET_REG32(reg)       (*(p)++ = tr32(reg))
 #define GET_REG32_LOOP(base,len)               \
@@ -6212,8 +7769,13 @@ do {     p = (u32 *)(orig_p + (reg));            \
        GET_REG32_LOOP(BUFMGR_MODE, 0x58);
        GET_REG32_LOOP(RDMAC_MODE, 0x08);
        GET_REG32_LOOP(WDMAC_MODE, 0x08);
-       GET_REG32_LOOP(RX_CPU_BASE, 0x280);
-       GET_REG32_LOOP(TX_CPU_BASE, 0x280);
+       GET_REG32_1(RX_CPU_MODE);
+       GET_REG32_1(RX_CPU_STATE);
+       GET_REG32_1(RX_CPU_PGMCTR);
+       GET_REG32_1(RX_CPU_HWBKPT);
+       GET_REG32_1(TX_CPU_MODE);
+       GET_REG32_1(TX_CPU_STATE);
+       GET_REG32_1(TX_CPU_PGMCTR);
        GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
        GET_REG32_LOOP(FTQ_RESET, 0x120);
        GET_REG32_LOOP(MSGINT_MODE, 0x0c);
@@ -6226,32 +7788,34 @@ do {    p = (u32 *)(orig_p + (reg));            \
 #undef GET_REG32_LOOP
 #undef GET_REG32_1
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 }
 
 static int tg3_get_eeprom_len(struct net_device *dev)
 {
-       return EEPROM_CHIP_SIZE;
+       struct tg3 *tp = netdev_priv(dev);
+
+       return tp->nvram_size;
 }
 
-static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
-                                                u32 offset, u32 *val);
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
+static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
+
 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
 {
-       struct tg3 *tp = dev->priv;
+       struct tg3 *tp = netdev_priv(dev);
        int ret;
        u8  *pd;
        u32 i, offset, len, val, b_offset, b_count;
 
+       if (tp->link_config.phy_is_low_power)
+               return -EAGAIN;
+
        offset = eeprom->offset;
        len = eeprom->len;
        eeprom->len = 0;
 
-       ret = tg3_nvram_read_using_eeprom(tp, 0, &eeprom->magic);
-       if (ret)
-               return ret;
-       eeprom->magic = swab32(eeprom->magic);
+       eeprom->magic = TG3_EEPROM_MAGIC;
 
        if (offset & 3) {
                /* adjustments to start on required 4 byte boundary */
@@ -6261,9 +7825,10 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
                        /* i.e. offset=1 len=2 */
                        b_count = len;
                }
-               ret = tg3_nvram_read_using_eeprom(tp, offset-b_offset, &val);
+               ret = tg3_nvram_read(tp, offset-b_offset, &val);
                if (ret)
                        return ret;
+               val = cpu_to_le32(val);
                memcpy(data, ((char*)&val) + b_offset, b_count);
                len -= b_count;
                offset += b_count;
@@ -6273,12 +7838,13 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        /* read bytes upto the last 4 byte boundary */
        pd = &data[eeprom->len];
        for (i = 0; i < (len - (len & 3)); i += 4) {
-               ret = tg3_nvram_read_using_eeprom(tp, offset + i, 
-                               (u32*)(pd + i));
+               ret = tg3_nvram_read(tp, offset + i, &val);
                if (ret) {
                        eeprom->len += i;
                        return ret;
                }
+               val = cpu_to_le32(val);
+               memcpy(pd + i, &val, 4);
        }
        eeprom->len += i;
 
@@ -6287,22 +7853,80 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
                pd = &data[eeprom->len];
                b_count = len & 3;
                b_offset = offset + len - b_count;
-               ret = tg3_nvram_read_using_eeprom(tp, b_offset, &val);
+               ret = tg3_nvram_read(tp, b_offset, &val);
                if (ret)
                        return ret;
+               val = cpu_to_le32(val);
                memcpy(pd, ((char*)&val), b_count);
                eeprom->len += b_count;
        }
        return 0;
 }
 
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+
+static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int ret;
+       u32 offset, len, b_offset, odd_len, start, end;
+       u8 *buf;
+
+       if (tp->link_config.phy_is_low_power)
+               return -EAGAIN;
+
+       if (eeprom->magic != TG3_EEPROM_MAGIC)
+               return -EINVAL;
+
+       offset = eeprom->offset;
+       len = eeprom->len;
+
+       if ((b_offset = (offset & 3))) {
+               /* adjustments to start on required 4 byte boundary */
+               ret = tg3_nvram_read(tp, offset-b_offset, &start);
+               if (ret)
+                       return ret;
+               start = cpu_to_le32(start);
+               len += b_offset;
+               offset &= ~3;
+               if (len < 4)
+                       len = 4;
+       }
+
+       odd_len = 0;
+       if (len & 3) {
+               /* adjustments to end on required 4 byte boundary */
+               odd_len = 1;
+               len = (len + 3) & ~3;
+               ret = tg3_nvram_read(tp, offset+len-4, &end);
+               if (ret)
+                       return ret;
+               end = cpu_to_le32(end);
+       }
+
+       buf = data;
+       if (b_offset || odd_len) {
+               buf = kmalloc(len, GFP_KERNEL);
+               if (buf == 0)
+                       return -ENOMEM;
+               if (b_offset)
+                       memcpy(buf, &start, 4);
+               if (odd_len)
+                       memcpy(buf+len-4, &end, 4);
+               memcpy(buf + b_offset, data, eeprom->len);
+       }
+
+       ret = tg3_nvram_write_block(tp, offset, len, buf);
+
+       if (buf != data)
+               kfree(buf);
+
+       return ret;
+}
+
 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
-       if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
-                                       tp->link_config.phy_is_low_power)
-               return -EAGAIN;
 
        cmd->supported = (SUPPORTED_Autoneg);
 
@@ -6310,19 +7934,23 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                cmd->supported |= (SUPPORTED_1000baseT_Half |
                                   SUPPORTED_1000baseT_Full);
 
-       if (tp->phy_id != PHY_ID_SERDES)
+       if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
                cmd->supported |= (SUPPORTED_100baseT_Half |
                                  SUPPORTED_100baseT_Full |
                                  SUPPORTED_10baseT_Half |
                                  SUPPORTED_10baseT_Full |
                                  SUPPORTED_MII);
-       else
+               cmd->port = PORT_TP;
+       } else {
                cmd->supported |= SUPPORTED_FIBRE;
-  
+               cmd->port = PORT_FIBRE;
+       }
+
        cmd->advertising = tp->link_config.advertising;
-       cmd->speed = tp->link_config.active_speed;
-       cmd->duplex = tp->link_config.active_duplex;
-       cmd->port = 0;
+       if (netif_running(dev)) {
+               cmd->speed = tp->link_config.active_speed;
+               cmd->duplex = tp->link_config.active_duplex;
+       }
        cmd->phy_address = PHY_ADDR;
        cmd->transceiver = 0;
        cmd->autoneg = tp->link_config.autoneg;
@@ -6330,16 +7958,12 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        cmd->maxrxpkt = 0;
        return 0;
 }
-  
+
 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
-       if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
-           tp->link_config.phy_is_low_power)
-               return -EAGAIN;
 
-       if (tp->phy_id == PHY_ID_SERDES) {
+       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
                /* These are the only valid advertisement bits allowed.  */
                if (cmd->autoneg == AUTONEG_ENABLE &&
                    (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
@@ -6347,10 +7971,19 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                          ADVERTISED_Autoneg |
                                          ADVERTISED_FIBRE)))
                        return -EINVAL;
-       }
+               /* Fiber can only do SPEED_1000.  */
+               else if ((cmd->autoneg != AUTONEG_ENABLE) &&
+                        (cmd->speed != SPEED_1000))
+                       return -EINVAL;
+       /* Copper cannot force SPEED_1000.  */
+       } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
+                  (cmd->speed == SPEED_1000))
+               return -EINVAL;
+       else if ((cmd->speed == SPEED_1000) &&
+                (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
+               return -EINVAL;
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        tp->link_config.autoneg = cmd->autoneg;
        if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -6362,67 +7995,73 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                tp->link_config.speed = cmd->speed;
                tp->link_config.duplex = cmd->duplex;
        }
-  
-       tg3_setup_phy(tp, 1);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
-  
+
+       tp->link_config.orig_speed = tp->link_config.speed;
+       tp->link_config.orig_duplex = tp->link_config.duplex;
+       tp->link_config.orig_autoneg = tp->link_config.autoneg;
+
+       if (netif_running(dev))
+               tg3_setup_phy(tp, 1);
+
+       tg3_full_unlock(tp);
+
        return 0;
 }
-  
+
 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        strcpy(info->driver, DRV_MODULE_NAME);
        strcpy(info->version, DRV_MODULE_VERSION);
+       strcpy(info->fw_version, tp->fw_ver);
        strcpy(info->bus_info, pci_name(tp->pdev));
 }
-  
+
 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        wol->supported = WAKE_MAGIC;
        wol->wolopts = 0;
        if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
                wol->wolopts = WAKE_MAGIC;
        memset(&wol->sopass, 0, sizeof(wol->sopass));
 }
-  
+
 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
        if ((wol->wolopts & WAKE_MAGIC) &&
-           tp->phy_id == PHY_ID_SERDES &&
+           tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
            !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
                return -EINVAL;
-  
-       spin_lock_irq(&tp->lock);
+
+       spin_lock_bh(&tp->lock);
        if (wol->wolopts & WAKE_MAGIC)
                tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
        else
                tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
-       spin_unlock_irq(&tp->lock);
-  
+       spin_unlock_bh(&tp->lock);
+
        return 0;
 }
-  
+
 static u32 tg3_get_msglevel(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        return tp->msg_enable;
 }
-  
+
 static void tg3_set_msglevel(struct net_device *dev, u32 value)
 {
        struct tg3 *tp = netdev_priv(dev);
        tp->msg_enable = value;
 }
-  
+
 #if TG3_TSO_SUPPORT != 0
 static int tg3_set_tso(struct net_device *dev, u32 value)
 {
@@ -6433,56 +8072,87 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
                        return -EINVAL;
                return 0;
        }
+       if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
+           (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
+               if (value)
+                       dev->features |= NETIF_F_TSO6;
+               else
+                       dev->features &= ~NETIF_F_TSO6;
+       }
        return ethtool_op_set_tso(dev, value);
 }
 #endif
-  
+
 static int tg3_nway_reset(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        u32 bmcr;
        int r;
-  
-       spin_lock_irq(&tp->lock);
-       tg3_readphy(tp, MII_BMCR, &bmcr);
-       tg3_readphy(tp, MII_BMCR, &bmcr);
+
+       if (!netif_running(dev))
+               return -EAGAIN;
+
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+               return -EINVAL;
+
+       spin_lock_bh(&tp->lock);
        r = -EINVAL;
-       if (bmcr & BMCR_ANENABLE) {
-               tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
+       tg3_readphy(tp, MII_BMCR, &bmcr);
+       if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+           ((bmcr & BMCR_ANENABLE) ||
+            (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
+               tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+                                          BMCR_ANENABLE);
                r = 0;
        }
-       spin_unlock_irq(&tp->lock);
-  
+       spin_unlock_bh(&tp->lock);
+
        return r;
 }
-  
+
 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
        ering->rx_mini_max_pending = 0;
-       ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
+               ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
+       else
+               ering->rx_jumbo_max_pending = 0;
+
+       ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
 
        ering->rx_pending = tp->rx_pending;
        ering->rx_mini_pending = 0;
-       ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
+               ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+       else
+               ering->rx_jumbo_pending = 0;
+
        ering->tx_pending = tp->tx_pending;
 }
-  
+
 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+       int irq_sync = 0, err = 0;
+
        if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
            (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
-           (ering->tx_pending > TG3_TX_RING_SIZE - 1))
+           (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
+           (ering->tx_pending <= MAX_SKB_FRAGS) ||
+           ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
+            (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
                return -EINVAL;
-  
-       tg3_netif_stop(tp);
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
-  
+
+       if (netif_running(dev)) {
+               tg3_netif_stop(tp);
+               irq_sync = 1;
+       }
+
+       tg3_full_lock(tp, irq_sync);
+
        tp->rx_pending = ering->rx_pending;
 
        if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
@@ -6491,93 +8161,105 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
        tp->rx_jumbo_pending = ering->rx_jumbo_pending;
        tp->tx_pending = ering->tx_pending;
 
-       tg3_halt(tp);
-       tg3_init_hw(tp);
-       netif_wake_queue(tp->dev);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
-       tg3_netif_start(tp);
-  
-       return 0;
+       if (netif_running(dev)) {
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               err = tg3_restart_hw(tp, 1);
+               if (!err)
+                       tg3_netif_start(tp);
+       }
+
+       tg3_full_unlock(tp);
+
+       return err;
 }
-  
+
 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
-       epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
-       epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
+       epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
+       epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
 }
-  
+
 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
-       tg3_netif_stop(tp);
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       int irq_sync = 0, err = 0;
+
+       if (netif_running(dev)) {
+               tg3_netif_stop(tp);
+               irq_sync = 1;
+       }
+
+       tg3_full_lock(tp, irq_sync);
+
        if (epause->autoneg)
                tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
        else
                tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
        if (epause->rx_pause)
-               tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
+               tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
        else
-               tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
+               tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
        if (epause->tx_pause)
-               tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
+               tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
        else
-               tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
-       tg3_halt(tp);
-       tg3_init_hw(tp);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
-       tg3_netif_start(tp);
-  
-       return 0;
+               tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
+
+       if (netif_running(dev)) {
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               err = tg3_restart_hw(tp, 1);
+               if (!err)
+                       tg3_netif_start(tp);
+       }
+
+       tg3_full_unlock(tp);
+
+       return err;
 }
-  
+
 static u32 tg3_get_rx_csum(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
 }
-  
+
 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
                if (data != 0)
                        return -EINVAL;
                return 0;
        }
-  
-       spin_lock_irq(&tp->lock);
+
+       spin_lock_bh(&tp->lock);
        if (data)
                tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
        else
                tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
-       spin_unlock_irq(&tp->lock);
-  
+       spin_unlock_bh(&tp->lock);
+
        return 0;
 }
-  
+
 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
 {
        struct tg3 *tp = netdev_priv(dev);
-  
+
        if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
                if (data != 0)
                        return -EINVAL;
                return 0;
        }
-  
-       if (data)
-               dev->features |= NETIF_F_IP_CSUM;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+               ethtool_op_set_tx_hw_csum(dev, data);
        else
-               dev->features &= ~NETIF_F_IP_CSUM;
+               ethtool_op_set_tx_csum(dev, data);
 
        return 0;
 }
@@ -6587,25 +8269,772 @@ static int tg3_get_stats_count (struct net_device *dev)
        return TG3_NUM_STATS;
 }
 
+static int tg3_get_test_count (struct net_device *dev)
+{
+       return TG3_NUM_TEST;
+}
+
 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
        case ETH_SS_STATS:
                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
                break;
+       case ETH_SS_TEST:
+               memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
+               break;
        default:
                WARN_ON(1);     /* we need a WARN() */
                break;
        }
 }
 
+static int tg3_phys_id(struct net_device *dev, u32 data)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int i;
+
+       if (!netif_running(tp->dev))
+               return -EAGAIN;
+
+       if (data == 0)
+               data = 2;
+
+       for (i = 0; i < (data * 2); i++) {
+               if ((i % 2) == 0)
+                       tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+                                          LED_CTRL_1000MBPS_ON |
+                                          LED_CTRL_100MBPS_ON |
+                                          LED_CTRL_10MBPS_ON |
+                                          LED_CTRL_TRAFFIC_OVERRIDE |
+                                          LED_CTRL_TRAFFIC_BLINK |
+                                          LED_CTRL_TRAFFIC_LED);
+
+               else
+                       tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+                                          LED_CTRL_TRAFFIC_OVERRIDE);
+
+               if (msleep_interruptible(500))
+                       break;
+       }
+       tw32(MAC_LED_CTRL, tp->led_ctrl);
+       return 0;
+}
+
 static void tg3_get_ethtool_stats (struct net_device *dev,
                                   struct ethtool_stats *estats, u64 *tmp_stats)
 {
-       struct tg3 *tp = dev->priv;
+       struct tg3 *tp = netdev_priv(dev);
        memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
 }
 
+#define NVRAM_TEST_SIZE 0x100
+#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
+#define NVRAM_SELFBOOT_HW_SIZE 0x20
+#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+       u32 *buf, csum, magic;
+       int i, j, err = 0, size;
+
+       if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
+               return -EIO;
+
+       if (magic == TG3_EEPROM_MAGIC)
+               size = NVRAM_TEST_SIZE;
+       else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
+               if ((magic & 0xe00000) == 0x200000)
+                       size = NVRAM_SELFBOOT_FORMAT1_SIZE;
+               else
+                       return 0;
+       } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+               size = NVRAM_SELFBOOT_HW_SIZE;
+       else
+               return -EIO;
+
+       buf = kmalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       err = -EIO;
+       for (i = 0, j = 0; i < size; i += 4, j++) {
+               u32 val;
+
+               if ((err = tg3_nvram_read(tp, i, &val)) != 0)
+                       break;
+               buf[j] = cpu_to_le32(val);
+       }
+       if (i < size)
+               goto out;
+
+       /* Selfboot format */
+       if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
+           TG3_EEPROM_MAGIC_FW) {
+               u8 *buf8 = (u8 *) buf, csum8 = 0;
+
+               for (i = 0; i < size; i++)
+                       csum8 += buf8[i];
+
+               if (csum8 == 0) {
+                       err = 0;
+                       goto out;
+               }
+
+               err = -EIO;
+               goto out;
+       }
+
+       if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
+           TG3_EEPROM_MAGIC_HW) {
+               u8 data[NVRAM_SELFBOOT_DATA_SIZE];
+               u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
+               u8 *buf8 = (u8 *) buf;
+               int j, k;
+
+               /* Separate the parity bits and the data bytes.  */
+               for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
+                       if ((i == 0) || (i == 8)) {
+                               int l;
+                               u8 msk;
+
+                               for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
+                                       parity[k++] = buf8[i] & msk;
+                               i++;
+                       }
+                       else if (i == 16) {
+                               int l;
+                               u8 msk;
+
+                               for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
+                                       parity[k++] = buf8[i] & msk;
+                               i++;
+
+                               for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
+                                       parity[k++] = buf8[i] & msk;
+                               i++;
+                       }
+                       data[j++] = buf8[i];
+               }
+
+               err = -EIO;
+               for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
+                       u8 hw8 = hweight8(data[i]);
+
+                       if ((hw8 & 0x1) && parity[i])
+                               goto out;
+                       else if (!(hw8 & 0x1) && !parity[i])
+                               goto out;
+               }
+               err = 0;
+               goto out;
+       }
+
+       /* Bootstrap checksum at offset 0x10 */
+       csum = calc_crc((unsigned char *) buf, 0x10);
+       if(csum != cpu_to_le32(buf[0x10/4]))
+               goto out;
+
+       /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+       csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+       if (csum != cpu_to_le32(buf[0xfc/4]))
+                goto out;
+
+       err = 0;
+
+out:
+       kfree(buf);
+       return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC 2
+#define TG3_COPPER_TIMEOUT_SEC 6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+       int i, max;
+
+       if (!netif_running(tp->dev))
+               return -ENODEV;
+
+       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+               max = TG3_SERDES_TIMEOUT_SEC;
+       else
+               max = TG3_COPPER_TIMEOUT_SEC;
+
+       for (i = 0; i < max; i++) {
+               if (netif_carrier_ok(tp->dev))
+                       return 0;
+
+               if (msleep_interruptible(1000))
+                       break;
+       }
+
+       return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+       int i, is_5705, is_5750;
+       u32 offset, read_mask, write_mask, val, save_val, read_val;
+       static struct {
+               u16 offset;
+               u16 flags;
+#define TG3_FL_5705    0x1
+#define TG3_FL_NOT_5705        0x2
+#define TG3_FL_NOT_5788        0x4
+#define TG3_FL_NOT_5750        0x8
+               u32 read_mask;
+               u32 write_mask;
+       } reg_tbl[] = {
+               /* MAC Control Registers */
+               { MAC_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x00ef6f8c },
+               { MAC_MODE, TG3_FL_5705,
+                       0x00000000, 0x01ef6b8c },
+               { MAC_STATUS, TG3_FL_NOT_5705,
+                       0x03800107, 0x00000000 },
+               { MAC_STATUS, TG3_FL_5705,
+                       0x03800100, 0x00000000 },
+               { MAC_ADDR_0_HIGH, 0x0000,
+                       0x00000000, 0x0000ffff },
+               { MAC_ADDR_0_LOW, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_RX_MTU_SIZE, 0x0000,
+                       0x00000000, 0x0000ffff },
+               { MAC_TX_MODE, 0x0000,
+                       0x00000000, 0x00000070 },
+               { MAC_TX_LENGTHS, 0x0000,
+                       0x00000000, 0x00003fff },
+               { MAC_RX_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x000007fc },
+               { MAC_RX_MODE, TG3_FL_5705,
+                       0x00000000, 0x000007dc },
+               { MAC_HASH_REG_0, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_HASH_REG_1, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_HASH_REG_2, 0x0000,
+                       0x00000000, 0xffffffff },
+               { MAC_HASH_REG_3, 0x0000,
+                       0x00000000, 0xffffffff },
+
+               /* Receive Data and Receive BD Initiator Control Registers. */
+               { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+                       0x00000000, 0x00000003 },
+               { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_STD_BD+0, 0x0000,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_STD_BD+4, 0x0000,
+                       0x00000000, 0xffffffff },
+               { RCVDBDI_STD_BD+8, 0x0000,
+                       0x00000000, 0xffff0002 },
+               { RCVDBDI_STD_BD+0xc, 0x0000,
+                       0x00000000, 0xffffffff },
+
+               /* Receive BD Initiator Control Registers. */
+               { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { RCVBDI_STD_THRESH, TG3_FL_5705,
+                       0x00000000, 0x000003ff },
+               { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+
+               /* Host Coalescing Control Registers. */
+               { HOSTCC_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x00000004 },
+               { HOSTCC_MODE, TG3_FL_5705,
+                       0x00000000, 0x000000f6 },
+               { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+                       0x00000000, 0x000003ff },
+               { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+                       0x00000000, 0x000003ff },
+               { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+                       0x00000000, 0x000000ff },
+               { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+                       0x00000000, 0xffffffff },
+               { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+                       0xffffffff, 0x00000000 },
+               { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+                       0xffffffff, 0x00000000 },
+
+               /* Buffer Manager Control Registers. */
+               { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
+                       0x00000000, 0x007fff80 },
+               { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
+                       0x00000000, 0x007fffff },
+               { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+                       0x00000000, 0x0000003f },
+               { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+                       0x00000000, 0x000001ff },
+               { BUFMGR_MB_HIGH_WATER, 0x0000,
+                       0x00000000, 0x000001ff },
+               { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+                       0xffffffff, 0x00000000 },
+               { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+                       0xffffffff, 0x00000000 },
+
+               /* Mailbox Registers */
+               { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+                       0x00000000, 0x000001ff },
+               { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+                       0x00000000, 0x000001ff },
+               { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+                       0x00000000, 0x000007ff },
+               { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+                       0x00000000, 0x000001ff },
+
+               { 0xffff, 0x0000, 0x00000000, 0x00000000 },
+       };
+
+       is_5705 = is_5750 = 0;
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               is_5705 = 1;
+               if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+                       is_5750 = 1;
+       }
+
+       for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+               if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+                       continue;
+
+               if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+                       continue;
+
+               if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+                   (reg_tbl[i].flags & TG3_FL_NOT_5788))
+                       continue;
+
+               if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
+                       continue;
+
+               offset = (u32) reg_tbl[i].offset;
+               read_mask = reg_tbl[i].read_mask;
+               write_mask = reg_tbl[i].write_mask;
+
+               /* Save the original register content */
+               save_val = tr32(offset);
+
+               /* Determine the read-only value. */
+               read_val = save_val & read_mask;
+
+               /* Write zero to the register, then make sure the read-only bits
+                * are not changed and the read/write bits are all zeros.
+                */
+               tw32(offset, 0);
+
+               val = tr32(offset);
+
+               /* Test the read-only and read/write bits. */
+               if (((val & read_mask) != read_val) || (val & write_mask))
+                       goto out;
+
+               /* Write ones to all the bits defined by RdMask and WrMask, then
+                * make sure the read-only bits are not changed and the
+                * read/write bits are all ones.
+                */
+               tw32(offset, read_mask | write_mask);
+
+               val = tr32(offset);
+
+               /* Test the read-only bits. */
+               if ((val & read_mask) != read_val)
+                       goto out;
+
+               /* Test the read/write bits. */
+               if ((val & write_mask) != write_mask)
+                       goto out;
+
+               tw32(offset, save_val);
+       }
+
+       return 0;
+
+out:
+       if (netif_msg_hw(tp))
+               printk(KERN_ERR PFX "Register test failed at offset %x\n",
+                      offset);
+       tw32(offset, save_val);
+       return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+       static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+       int i;
+       u32 j;
+
+       for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
+               for (j = 0; j < len; j += 4) {
+                       u32 val;
+
+                       tg3_write_mem(tp, offset + j, test_pattern[i]);
+                       tg3_read_mem(tp, offset + j, &val);
+                       if (val != test_pattern[i])
+                               return -EIO;
+               }
+       }
+       return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+       static struct mem_entry {
+               u32 offset;
+               u32 len;
+       } mem_tbl_570x[] = {
+               { 0x00000000, 0x00b50},
+               { 0x00002000, 0x1c000},
+               { 0xffffffff, 0x00000}
+       }, mem_tbl_5705[] = {
+               { 0x00000100, 0x0000c},
+               { 0x00000200, 0x00008},
+               { 0x00004000, 0x00800},
+               { 0x00006000, 0x01000},
+               { 0x00008000, 0x02000},
+               { 0x00010000, 0x0e000},
+               { 0xffffffff, 0x00000}
+       }, mem_tbl_5755[] = {
+               { 0x00000200, 0x00008},
+               { 0x00004000, 0x00800},
+               { 0x00006000, 0x00800},
+               { 0x00008000, 0x02000},
+               { 0x00010000, 0x0c000},
+               { 0xffffffff, 0x00000}
+       }, mem_tbl_5906[] = {
+               { 0x00000200, 0x00008},
+               { 0x00004000, 0x00400},
+               { 0x00006000, 0x00400},
+               { 0x00008000, 0x01000},
+               { 0x00010000, 0x01000},
+               { 0xffffffff, 0x00000}
+       };
+       struct mem_entry *mem_tbl;
+       int err = 0;
+       int i;
+
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+                       mem_tbl = mem_tbl_5755;
+               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+                       mem_tbl = mem_tbl_5906;
+               else
+                       mem_tbl = mem_tbl_5705;
+       } else
+               mem_tbl = mem_tbl_570x;
+
+       for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+               if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
+                   mem_tbl[i].len)) != 0)
+                       break;
+       }
+
+       return err;
+}
+
+#define TG3_MAC_LOOPBACK       0
+#define TG3_PHY_LOOPBACK       1
+
+static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
+{
+       u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
+       u32 desc_idx;
+       struct sk_buff *skb, *rx_skb;
+       u8 *tx_data;
+       dma_addr_t map;
+       int num_pkts, tx_len, rx_len, i, err;
+       struct tg3_rx_buffer_desc *desc;
+
+       if (loopback_mode == TG3_MAC_LOOPBACK) {
+               /* HW errata - mac loopback fails in some cases on 5780.
+                * Normal traffic and PHY loopback are not affected by
+                * errata.
+                */
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+                       return 0;
+
+               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+                          MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
+               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+                       mac_mode |= MAC_MODE_PORT_MODE_MII;
+               else
+                       mac_mode |= MAC_MODE_PORT_MODE_GMII;
+               tw32(MAC_MODE, mac_mode);
+       } else if (loopback_mode == TG3_PHY_LOOPBACK) {
+               u32 val;
+
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                       u32 phytest;
+
+                       if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
+                               u32 phy;
+
+                               tg3_writephy(tp, MII_TG3_EPHY_TEST,
+                                            phytest | MII_TG3_EPHY_SHADOW_EN);
+                               if (!tg3_readphy(tp, 0x1b, &phy))
+                                       tg3_writephy(tp, 0x1b, phy & ~0x20);
+                               if (!tg3_readphy(tp, 0x10, &phy))
+                                       tg3_writephy(tp, 0x10, phy & ~0x4000);
+                               tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
+                       }
+                       val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
+               } else
+                       val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+
+               tg3_writephy(tp, MII_BMCR, val);
+               udelay(40);
+
+               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+                          MAC_MODE_LINK_POLARITY;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                       tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
+                       mac_mode |= MAC_MODE_PORT_MODE_MII;
+               } else
+                       mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+               /* reset to prevent losing 1st rx packet intermittently */
+               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+                       tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+                       udelay(10);
+                       tw32_f(MAC_RX_MODE, tp->rx_mode);
+               }
+               if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+                       mac_mode &= ~MAC_MODE_LINK_POLARITY;
+                       tg3_writephy(tp, MII_TG3_EXT_CTRL,
+                                    MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+               }
+               tw32(MAC_MODE, mac_mode);
+       }
+       else
+               return -EINVAL;
+
+       err = -EIO;
+
+       tx_len = 1514;
+       skb = netdev_alloc_skb(tp->dev, tx_len);
+       if (!skb)
+               return -ENOMEM;
+
+       tx_data = skb_put(skb, tx_len);
+       memcpy(tx_data, tp->dev->dev_addr, 6);
+       memset(tx_data + 6, 0x0, 8);
+
+       tw32(MAC_RX_MTU_SIZE, tx_len + 4);
+
+       for (i = 14; i < tx_len; i++)
+               tx_data[i] = (u8) (i & 0xff);
+
+       map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+
+       tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+            HOSTCC_MODE_NOW);
+
+       udelay(10);
+
+       rx_start_idx = tp->hw_status->idx[0].rx_producer;
+
+       num_pkts = 0;
+
+       tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
+
+       tp->tx_prod++;
+       num_pkts++;
+
+       tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
+                    tp->tx_prod);
+       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
+
+       udelay(10);
+
+       /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
+       for (i = 0; i < 25; i++) {
+               tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+                      HOSTCC_MODE_NOW);
+
+               udelay(10);
+
+               tx_idx = tp->hw_status->idx[0].tx_consumer;
+               rx_idx = tp->hw_status->idx[0].rx_producer;
+               if ((tx_idx == tp->tx_prod) &&
+                   (rx_idx == (rx_start_idx + num_pkts)))
+                       break;
+       }
+
+       pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+       dev_kfree_skb(skb);
+
+       if (tx_idx != tp->tx_prod)
+               goto out;
+
+       if (rx_idx != rx_start_idx + num_pkts)
+               goto out;
+
+       desc = &tp->rx_rcb[rx_start_idx];
+       desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+       opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+       if (opaque_key != RXD_OPAQUE_RING_STD)
+               goto out;
+
+       if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+           (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+               goto out;
+
+       rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
+       if (rx_len != tx_len)
+               goto out;
+
+       rx_skb = tp->rx_std_buffers[desc_idx].skb;
+
+       map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
+       pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
+
+       for (i = 14; i < tx_len; i++) {
+               if (*(rx_skb->data + i) != (u8) (i & 0xff))
+                       goto out;
+       }
+       err = 0;
+
+       /* tg3_free_rings will unmap and free the rx_skb */
+out:
+       return err;
+}
+
+#define TG3_MAC_LOOPBACK_FAILED                1
+#define TG3_PHY_LOOPBACK_FAILED                2
+#define TG3_LOOPBACK_FAILED            (TG3_MAC_LOOPBACK_FAILED |      \
+                                        TG3_PHY_LOOPBACK_FAILED)
+
+static int tg3_test_loopback(struct tg3 *tp)
+{
+       int err = 0;
+
+       if (!netif_running(tp->dev))
+               return TG3_LOOPBACK_FAILED;
+
+       err = tg3_reset_hw(tp, 1);
+       if (err)
+               return TG3_LOOPBACK_FAILED;
+
+       if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
+               err |= TG3_MAC_LOOPBACK_FAILED;
+       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+               if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
+                       err |= TG3_PHY_LOOPBACK_FAILED;
+       }
+
+       return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+                         u64 *data)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (tp->link_config.phy_is_low_power)
+               tg3_set_power_state(tp, PCI_D0);
+
+       memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+       if (tg3_test_nvram(tp) != 0) {
+               etest->flags |= ETH_TEST_FL_FAILED;
+               data[0] = 1;
+       }
+       if (tg3_test_link(tp) != 0) {
+               etest->flags |= ETH_TEST_FL_FAILED;
+               data[1] = 1;
+       }
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               int err, irq_sync = 0;
+
+               if (netif_running(dev)) {
+                       tg3_netif_stop(tp);
+                       irq_sync = 1;
+               }
+
+               tg3_full_lock(tp, irq_sync);
+
+               tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+               err = tg3_nvram_lock(tp);
+               tg3_halt_cpu(tp, RX_CPU_BASE);
+               if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+                       tg3_halt_cpu(tp, TX_CPU_BASE);
+               if (!err)
+                       tg3_nvram_unlock(tp);
+
+               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+                       tg3_phy_reset(tp);
+
+               if (tg3_test_registers(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[2] = 1;
+               }
+               if (tg3_test_memory(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[3] = 1;
+               }
+               if ((data[4] = tg3_test_loopback(tp)) != 0)
+                       etest->flags |= ETH_TEST_FL_FAILED;
+
+               tg3_full_unlock(tp);
+
+               if (tg3_test_interrupt(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[5] = 1;
+               }
+
+               tg3_full_lock(tp, 0);
+
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               if (netif_running(dev)) {
+                       tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+                       if (!tg3_restart_hw(tp, 1))
+                               tg3_netif_start(tp);
+               }
+
+               tg3_full_unlock(tp);
+       }
+       if (tp->link_config.phy_is_low_power)
+               tg3_set_power_state(tp, PCI_D3hot);
+
+}
+
 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct mii_ioctl_data *data = if_mii(ifr);
@@ -6620,12 +9049,15 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG: {
                u32 mii_regval;
 
-               if (tp->phy_id == PHY_ID_SERDES)
+               if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               spin_lock_irq(&tp->lock);
+               if (tp->link_config.phy_is_low_power)
+                       return -EAGAIN;
+
+               spin_lock_bh(&tp->lock);
                err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
-               spin_unlock_irq(&tp->lock);
+               spin_unlock_bh(&tp->lock);
 
                data->val_out = mii_regval;
 
@@ -6633,15 +9065,18 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 
        case SIOCSMIIREG:
-               if (tp->phy_id == PHY_ID_SERDES)
+               if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
                        break;                  /* We have no PHY */
 
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
 
-               spin_lock_irq(&tp->lock);
+               if (tp->link_config.phy_is_low_power)
+                       return -EAGAIN;
+
+               spin_lock_bh(&tp->lock);
                err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
-               spin_unlock_irq(&tp->lock);
+               spin_unlock_bh(&tp->lock);
 
                return err;
 
@@ -6657,32 +9092,102 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       if (netif_running(dev))
+               tg3_netif_stop(tp);
+
+       tg3_full_lock(tp, 0);
 
        tp->vlgrp = grp;
 
        /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
        __tg3_set_rx_mode(dev);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
+
+       if (netif_running(dev))
+               tg3_netif_start(tp);
 }
 
 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       if (netif_running(dev))
+               tg3_netif_stop(tp);
+
+       tg3_full_lock(tp, 0);
        if (tp->vlgrp)
                tp->vlgrp->vlan_devices[vid] = NULL;
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
+
+       if (netif_running(dev))
+               tg3_netif_start(tp);
 }
 #endif
 
-static struct ethtool_ops tg3_ethtool_ops = {
+static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       memcpy(ec, &tp->coal, sizeof(*ec));
+       return 0;
+}
+
+static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
+       u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
+
+       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+               max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
+               max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
+               max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
+               min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
+       }
+
+       if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+           (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+           (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
+           (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
+           (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
+           (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
+           (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
+           (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
+           (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
+           (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
+               return -EINVAL;
+
+       /* No rx interrupts will be generated if both are zero */
+       if ((ec->rx_coalesce_usecs == 0) &&
+           (ec->rx_max_coalesced_frames == 0))
+               return -EINVAL;
+
+       /* No tx interrupts will be generated if both are zero */
+       if ((ec->tx_coalesce_usecs == 0) &&
+           (ec->tx_max_coalesced_frames == 0))
+               return -EINVAL;
+
+       /* Only copy relevant parameters, ignore all others. */
+       tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
+       tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
+       tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+       tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+       tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+       tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+       tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+       tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+       tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
+
+       if (netif_running(dev)) {
+               tg3_full_lock(tp, 0);
+               __tg3_set_coalesce(tp, &tp->coal);
+               tg3_full_unlock(tp);
+       }
+       return 0;
+}
+
+static const struct ethtool_ops tg3_ethtool_ops = {
        .get_settings           = tg3_get_settings,
        .set_settings           = tg3_set_settings,
        .get_drvinfo            = tg3_get_drvinfo,
@@ -6696,6 +9201,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_eeprom_len         = tg3_get_eeprom_len,
        .get_eeprom             = tg3_get_eeprom,
+       .set_eeprom             = tg3_set_eeprom,
        .get_ringparam          = tg3_get_ringparam,
        .set_ringparam          = tg3_set_ringparam,
        .get_pauseparam         = tg3_get_pauseparam,
@@ -6710,154 +9216,688 @@ static struct ethtool_ops tg3_ethtool_ops = {
        .get_tso                = ethtool_op_get_tso,
        .set_tso                = tg3_set_tso,
 #endif
+       .self_test_count        = tg3_get_test_count,
+       .self_test              = tg3_self_test,
        .get_strings            = tg3_get_strings,
+       .phys_id                = tg3_phys_id,
        .get_stats_count        = tg3_get_stats_count,
        .get_ethtool_stats      = tg3_get_ethtool_stats,
+       .get_coalesce           = tg3_get_coalesce,
+       .set_coalesce           = tg3_set_coalesce,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
-/* Chips other than 5700/5701 use the NVRAM for fetching info. */
-static void __devinit tg3_nvram_init(struct tg3 *tp)
+static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
 {
-       int j;
+       u32 cursize, val, magic;
 
-       if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
+       tp->nvram_size = EEPROM_CHIP_SIZE;
+
+       if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
                return;
 
+       if ((magic != TG3_EEPROM_MAGIC) &&
+           ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
+           ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
+               return;
+
+       /*
+        * Size the chip by reading offsets at increasing powers of two.
+        * When we encounter our validation signature, we know the addressing
+        * has wrapped around, and thus have our chip size.
+        */
+       cursize = 0x10;
+
+       while (cursize < tp->nvram_size) {
+               if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
+                       return;
+
+               if (val == magic)
+                       break;
+
+               cursize <<= 1;
+       }
+
+       tp->nvram_size = cursize;
+}
+
+static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+{
+       u32 val;
+
+       if (tg3_nvram_read_swab(tp, 0, &val) != 0)
+               return;
+
+       /* Selfboot format */
+       if (val != TG3_EEPROM_MAGIC) {
+               tg3_get_eeprom_size(tp);
+               return;
+       }
+
+       if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+               if (val != 0) {
+                       tp->nvram_size = (val >> 16) * 1024;
+                       return;
+               }
+       }
+       tp->nvram_size = 0x20000;
+}
+
+static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+{
+       u32 nvcfg1;
+
+       nvcfg1 = tr32(NVRAM_CFG1);
+       if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
+               tp->tg3_flags2 |= TG3_FLG2_FLASH;
+       }
+       else {
+               nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+               tw32(NVRAM_CFG1, nvcfg1);
+       }
+
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
+           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+               switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
+                       case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+                               tp->nvram_jedecnum = JEDEC_ATMEL;
+                               tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+                               tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                               break;
+                       case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+                               tp->nvram_jedecnum = JEDEC_ATMEL;
+                               tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+                               break;
+                       case FLASH_VENDOR_ATMEL_EEPROM:
+                               tp->nvram_jedecnum = JEDEC_ATMEL;
+                               tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+                               tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                               break;
+                       case FLASH_VENDOR_ST:
+                               tp->nvram_jedecnum = JEDEC_ST;
+                               tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+                               tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                               break;
+                       case FLASH_VENDOR_SAIFUN:
+                               tp->nvram_jedecnum = JEDEC_SAIFUN;
+                               tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+                               break;
+                       case FLASH_VENDOR_SST_SMALL:
+                       case FLASH_VENDOR_SST_LARGE:
+                               tp->nvram_jedecnum = JEDEC_SST;
+                               tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+                               break;
+               }
+       }
+       else {
+               tp->nvram_jedecnum = JEDEC_ATMEL;
+               tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+               tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+       }
+}
+
+static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
+{
+       u32 nvcfg1;
+
+       nvcfg1 = tr32(NVRAM_CFG1);
+
+       /* NVRAM protection for TPM */
+       if (nvcfg1 & (1 << 27))
+               tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+
+       switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+               case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
+               case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
+                       tp->nvram_jedecnum = JEDEC_ATMEL;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       break;
+               case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+                       tp->nvram_jedecnum = JEDEC_ATMEL;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
+                       break;
+               case FLASH_5752VENDOR_ST_M45PE10:
+               case FLASH_5752VENDOR_ST_M45PE20:
+               case FLASH_5752VENDOR_ST_M45PE40:
+                       tp->nvram_jedecnum = JEDEC_ST;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
+                       break;
+       }
+
+       if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
+               switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
+                       case FLASH_5752PAGE_SIZE_256:
+                               tp->nvram_pagesize = 256;
+                               break;
+                       case FLASH_5752PAGE_SIZE_512:
+                               tp->nvram_pagesize = 512;
+                               break;
+                       case FLASH_5752PAGE_SIZE_1K:
+                               tp->nvram_pagesize = 1024;
+                               break;
+                       case FLASH_5752PAGE_SIZE_2K:
+                               tp->nvram_pagesize = 2048;
+                               break;
+                       case FLASH_5752PAGE_SIZE_4K:
+                               tp->nvram_pagesize = 4096;
+                               break;
+                       case FLASH_5752PAGE_SIZE_264:
+                               tp->nvram_pagesize = 264;
+                               break;
+               }
+       }
+       else {
+               /* For eeprom, set pagesize to maximum eeprom size */
+               tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+               nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+               tw32(NVRAM_CFG1, nvcfg1);
+       }
+}
+
+static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
+{
+       u32 nvcfg1;
+
+       nvcfg1 = tr32(NVRAM_CFG1);
+
+       /* NVRAM protection for TPM */
+       if (nvcfg1 & (1 << 27))
+               tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+
+       switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+               case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
+               case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
+                       tp->nvram_jedecnum = JEDEC_ATMEL;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+                       nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+                       tw32(NVRAM_CFG1, nvcfg1);
+                       break;
+               case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+               case FLASH_5755VENDOR_ATMEL_FLASH_1:
+               case FLASH_5755VENDOR_ATMEL_FLASH_2:
+               case FLASH_5755VENDOR_ATMEL_FLASH_3:
+               case FLASH_5755VENDOR_ATMEL_FLASH_4:
+                       tp->nvram_jedecnum = JEDEC_ATMEL;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
+                       tp->nvram_pagesize = 264;
+                       break;
+               case FLASH_5752VENDOR_ST_M45PE10:
+               case FLASH_5752VENDOR_ST_M45PE20:
+               case FLASH_5752VENDOR_ST_M45PE40:
+                       tp->nvram_jedecnum = JEDEC_ST;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
+                       tp->nvram_pagesize = 256;
+                       break;
+       }
+}
+
+static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
+{
+       u32 nvcfg1;
+
+       nvcfg1 = tr32(NVRAM_CFG1);
+
+       switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+               case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+               case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+               case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+               case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+                       tp->nvram_jedecnum = JEDEC_ATMEL;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+                       nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+                       tw32(NVRAM_CFG1, nvcfg1);
+                       break;
+               case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+               case FLASH_5755VENDOR_ATMEL_FLASH_1:
+               case FLASH_5755VENDOR_ATMEL_FLASH_2:
+               case FLASH_5755VENDOR_ATMEL_FLASH_3:
+                       tp->nvram_jedecnum = JEDEC_ATMEL;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
+                       tp->nvram_pagesize = 264;
+                       break;
+               case FLASH_5752VENDOR_ST_M45PE10:
+               case FLASH_5752VENDOR_ST_M45PE20:
+               case FLASH_5752VENDOR_ST_M45PE40:
+                       tp->nvram_jedecnum = JEDEC_ST;
+                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
+                       tp->nvram_pagesize = 256;
+                       break;
+       }
+}
+
+static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
+{
+       tp->nvram_jedecnum = JEDEC_ATMEL;
+       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+       tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+}
+
+/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+static void __devinit tg3_nvram_init(struct tg3 *tp)
+{
        tw32_f(GRC_EEPROM_ADDR,
             (EEPROM_ADDR_FSM_RESET |
              (EEPROM_DEFAULT_CLOCK_PERIOD <<
               EEPROM_ADDR_CLKPERD_SHIFT)));
 
-       /* XXX schedule_timeout() ... */
-       for (j = 0; j < 100; j++)
-               udelay(10);
+       msleep(1);
 
        /* Enable seeprom accesses. */
        tw32_f(GRC_LOCAL_CTRL,
             tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
        udelay(100);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
-               u32 nvcfg1;
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+               tp->tg3_flags |= TG3_FLAG_NVRAM;
+
+               if (tg3_nvram_lock(tp)) {
+                       printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
+                              "tg3_nvram_init failed.\n", tp->dev->name);
+                       return;
+               }
+               tg3_enable_nvram_access(tp);
+
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+                       tg3_get_5752_nvram_info(tp);
+               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+                       tg3_get_5755_nvram_info(tp);
+               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+                       tg3_get_5787_nvram_info(tp);
+               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+                       tg3_get_5906_nvram_info(tp);
+               else
+                       tg3_get_nvram_info(tp);
+
+               tg3_get_nvram_size(tp);
+
+               tg3_disable_nvram_access(tp);
+               tg3_nvram_unlock(tp);
+
+       } else {
+               tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
+
+               tg3_get_eeprom_size(tp);
+       }
+}
+
+static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
+                                       u32 offset, u32 *val)
+{
+       u32 tmp;
+       int i;
+
+       if (offset > EEPROM_ADDR_ADDR_MASK ||
+           (offset % 4) != 0)
+               return -EINVAL;
+
+       tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
+                                       EEPROM_ADDR_DEVID_MASK |
+                                       EEPROM_ADDR_READ);
+       tw32(GRC_EEPROM_ADDR,
+            tmp |
+            (0 << EEPROM_ADDR_DEVID_SHIFT) |
+            ((offset << EEPROM_ADDR_ADDR_SHIFT) &
+             EEPROM_ADDR_ADDR_MASK) |
+            EEPROM_ADDR_READ | EEPROM_ADDR_START);
+
+       for (i = 0; i < 1000; i++) {
+               tmp = tr32(GRC_EEPROM_ADDR);
+
+               if (tmp & EEPROM_ADDR_COMPLETE)
+                       break;
+               msleep(1);
+       }
+       if (!(tmp & EEPROM_ADDR_COMPLETE))
+               return -EBUSY;
+
+       *val = tr32(GRC_EEPROM_DATA);
+       return 0;
+}
+
+#define NVRAM_CMD_TIMEOUT 10000
+
+static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
+{
+       int i;
+
+       tw32(NVRAM_CMD, nvram_cmd);
+       for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
+               udelay(10);
+               if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
+                       udelay(10);
+                       break;
+               }
+       }
+       if (i == NVRAM_CMD_TIMEOUT) {
+               return -EBUSY;
+       }
+       return 0;
+}
+
+static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
+{
+       if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
+           (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
+           (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+           (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+               addr = ((addr / tp->nvram_pagesize) <<
+                       ATMEL_AT45DB0X1B_PAGE_POS) +
+                      (addr % tp->nvram_pagesize);
+
+       return addr;
+}
+
+static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
+{
+       if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
+           (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
+           (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+           (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+               addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
+                       tp->nvram_pagesize) +
+                      (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
+
+       return addr;
+}
+
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+       int ret;
+
+       if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
+               return tg3_nvram_read_using_eeprom(tp, offset, val);
+
+       offset = tg3_nvram_phys_addr(tp, offset);
+
+       if (offset > NVRAM_ADDR_MSK)
+               return -EINVAL;
+
+       ret = tg3_nvram_lock(tp);
+       if (ret)
+               return ret;
+
+       tg3_enable_nvram_access(tp);
+
+       tw32(NVRAM_ADDR, offset);
+       ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
+               NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+
+       if (ret == 0)
+               *val = swab32(tr32(NVRAM_RDDATA));
+
+       tg3_disable_nvram_access(tp);
+
+       tg3_nvram_unlock(tp);
+
+       return ret;
+}
+
+static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
+{
+       int err;
+       u32 tmp;
+
+       err = tg3_nvram_read(tp, offset, &tmp);
+       *val = swab32(tmp);
+       return err;
+}
+
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+                                   u32 offset, u32 len, u8 *buf)
+{
+       int i, j, rc = 0;
+       u32 val;
+
+       for (i = 0; i < len; i += 4) {
+               u32 addr, data;
+
+               addr = offset + i;
+
+               memcpy(&data, buf + i, 4);
+
+               tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
+
+               val = tr32(GRC_EEPROM_ADDR);
+               tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+               val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+                       EEPROM_ADDR_READ);
+               tw32(GRC_EEPROM_ADDR, val |
+                       (0 << EEPROM_ADDR_DEVID_SHIFT) |
+                       (addr & EEPROM_ADDR_ADDR_MASK) |
+                       EEPROM_ADDR_START |
+                       EEPROM_ADDR_WRITE);
+
+               for (j = 0; j < 1000; j++) {
+                       val = tr32(GRC_EEPROM_ADDR);
+
+                       if (val & EEPROM_ADDR_COMPLETE)
+                               break;
+                       msleep(1);
+               }
+               if (!(val & EEPROM_ADDR_COMPLETE)) {
+                       rc = -EBUSY;
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+               u8 *buf)
+{
+       int ret = 0;
+       u32 pagesize = tp->nvram_pagesize;
+       u32 pagemask = pagesize - 1;
+       u32 nvram_cmd;
+       u8 *tmp;
+
+       tmp = kmalloc(pagesize, GFP_KERNEL);
+       if (tmp == NULL)
+               return -ENOMEM;
+
+       while (len) {
+               int j;
+               u32 phy_addr, page_off, size;
+
+               phy_addr = offset & ~pagemask;
+
+               for (j = 0; j < pagesize; j += 4) {
+                       if ((ret = tg3_nvram_read(tp, phy_addr + j,
+                                               (u32 *) (tmp + j))))
+                               break;
+               }
+               if (ret)
+                       break;
+
+               page_off = offset & pagemask;
+               size = pagesize;
+               if (len < size)
+                       size = len;
+
+               len -= size;
+
+               memcpy(tmp + page_off, buf, size);
+
+               offset = offset + (pagesize - page_off);
+
+               tg3_enable_nvram_access(tp);
+
+               /*
+                * Before we can erase the flash page, we need
+                * to issue a special "write enable" command.
+                */
+               nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+               if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+                       break;
+
+               /* Erase the target page */
+               tw32(NVRAM_ADDR, phy_addr);
+
+               nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+                       NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+               if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+                       break;
+
+               /* Issue another write enable to start the write. */
+               nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+               if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+                       break;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
-                       u32 nvaccess = tr32(NVRAM_ACCESS);
+               for (j = 0; j < pagesize; j += 4) {
+                       u32 data;
 
-                       tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
-               }
+                       data = *((u32 *) (tmp + j));
+                       tw32(NVRAM_WRDATA, cpu_to_be32(data));
 
-               nvcfg1 = tr32(NVRAM_CFG1);
+                       tw32(NVRAM_ADDR, phy_addr + j);
 
-               tp->tg3_flags |= TG3_FLAG_NVRAM;
-               if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
-                       if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
-                               tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
-               } else {
-                       nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
-                       tw32(NVRAM_CFG1, nvcfg1);
-               }
+                       nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+                               NVRAM_CMD_WR;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
-                       u32 nvaccess = tr32(NVRAM_ACCESS);
+                       if (j == 0)
+                               nvram_cmd |= NVRAM_CMD_FIRST;
+                       else if (j == (pagesize - 4))
+                               nvram_cmd |= NVRAM_CMD_LAST;
 
-                       tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+                       if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+                               break;
                }
-       } else {
-               tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
+               if (ret)
+                       break;
        }
+
+       nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+       tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+       kfree(tmp);
+
+       return ret;
 }
 
-static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
-                                                u32 offset, u32 *val)
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+               u8 *buf)
 {
-       u32 tmp;
-       int i;
+       int i, ret = 0;
 
-       if (offset > EEPROM_ADDR_ADDR_MASK ||
-           (offset % 4) != 0)
-               return -EINVAL;
+       for (i = 0; i < len; i += 4, offset += 4) {
+               u32 data, page_off, phy_addr, nvram_cmd;
 
-       tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
-                                       EEPROM_ADDR_DEVID_MASK |
-                                       EEPROM_ADDR_READ);
-       tw32(GRC_EEPROM_ADDR,
-            tmp |
-            (0 << EEPROM_ADDR_DEVID_SHIFT) |
-            ((offset << EEPROM_ADDR_ADDR_SHIFT) &
-             EEPROM_ADDR_ADDR_MASK) |
-            EEPROM_ADDR_READ | EEPROM_ADDR_START);
+               memcpy(&data, buf + i, 4);
+               tw32(NVRAM_WRDATA, cpu_to_be32(data));
 
-       for (i = 0; i < 10000; i++) {
-               tmp = tr32(GRC_EEPROM_ADDR);
+               page_off = offset % tp->nvram_pagesize;
 
-               if (tmp & EEPROM_ADDR_COMPLETE)
+               phy_addr = tg3_nvram_phys_addr(tp, offset);
+
+               tw32(NVRAM_ADDR, phy_addr);
+
+               nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+               if ((page_off == 0) || (i == 0))
+                       nvram_cmd |= NVRAM_CMD_FIRST;
+               if (page_off == (tp->nvram_pagesize - 4))
+                       nvram_cmd |= NVRAM_CMD_LAST;
+
+               if (i == (len - 4))
+                       nvram_cmd |= NVRAM_CMD_LAST;
+
+               if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
+                   (tp->nvram_jedecnum == JEDEC_ST) &&
+                   (nvram_cmd & NVRAM_CMD_FIRST)) {
+
+                       if ((ret = tg3_nvram_exec_cmd(tp,
+                               NVRAM_CMD_WREN | NVRAM_CMD_GO |
+                               NVRAM_CMD_DONE)))
+
+                               break;
+               }
+               if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
+                       /* We always do complete word writes to eeprom. */
+                       nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+               }
+
+               if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
                        break;
-               udelay(100);
        }
-       if (!(tmp & EEPROM_ADDR_COMPLETE))
-               return -EBUSY;
-
-       *val = tr32(GRC_EEPROM_DATA);
-       return 0;
+       return ret;
 }
 
-static int __devinit tg3_nvram_read(struct tg3 *tp,
-                                   u32 offset, u32 *val)
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 {
-       int i;
+       int ret;
 
-       if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
-               printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
-               return -EINVAL;
+       if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+               tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+                      ~GRC_LCLCTRL_GPIO_OUTPUT1);
+               udelay(40);
        }
 
-       if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
-               return tg3_nvram_read_using_eeprom(tp, offset, val);
-
-       if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
-               offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
-                         NVRAM_BUFFERED_PAGE_POS) +
-                       (offset % NVRAM_BUFFERED_PAGE_SIZE);
-
-       if (offset > NVRAM_ADDR_MSK)
-               return -EINVAL;
+       if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
+               ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+       }
+       else {
+               u32 grc_mode;
 
-       tg3_nvram_lock(tp);
+               ret = tg3_nvram_lock(tp);
+               if (ret)
+                       return ret;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
-               u32 nvaccess = tr32(NVRAM_ACCESS);
+               tg3_enable_nvram_access(tp);
+               if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
+                   !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
+                       tw32(NVRAM_WRITE1, 0x406);
 
-               tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
-       }
+               grc_mode = tr32(GRC_MODE);
+               tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
 
-       tw32(NVRAM_ADDR, offset);
-       tw32(NVRAM_CMD,
-            NVRAM_CMD_RD | NVRAM_CMD_GO |
-            NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+               if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
+                       !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
 
-       /* Wait for done bit to clear. */
-       for (i = 0; i < 1000; i++) {
-               udelay(10);
-               if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
-                       udelay(10);
-                       *val = swab32(tr32(NVRAM_RDDATA));
-                       break;
+                       ret = tg3_nvram_write_block_buffered(tp, offset, len,
+                               buf);
+               }
+               else {
+                       ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+                               buf);
                }
-       }
-
-       tg3_nvram_unlock(tp);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
-               u32 nvaccess = tr32(NVRAM_ACCESS);
+               grc_mode = tr32(GRC_MODE);
+               tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
 
-               tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+               tg3_disable_nvram_access(tp);
+               tg3_nvram_unlock(tp);
        }
 
-       if (i >= 1000)
-               return -EBUSY;
+       if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+               tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+               udelay(40);
+       }
 
-       return 0;
+       return ret;
 }
 
 struct subsys_tbl_ent {
@@ -6870,10 +9910,10 @@ static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
        { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
        { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
        { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
-       { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
+       { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
        { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
        { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
-       { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
+       { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
        { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
        { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
        { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
@@ -6882,7 +9922,7 @@ static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
        /* 3com boards. */
        { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
        { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
-       { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
+       { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
        { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
        { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
 
@@ -6895,65 +9935,113 @@ static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
        /* Compaq boards. */
        { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
        { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
-       { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
+       { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
        { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
        { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
 
        /* IBM boards. */
-       { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
+       { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
 };
 
-static int __devinit tg3_phy_probe(struct tg3 *tp)
+static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
 {
-       u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
-       u32 hw_phy_id, hw_phy_id_masked;
-       u32 val;
-       int i, eeprom_signature_found, err;
+       int i;
 
-       tp->phy_id = PHY_ID_INVALID;
        for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
                if ((subsys_id_to_phy_id[i].subsys_vendor ==
                     tp->pdev->subsystem_vendor) &&
                    (subsys_id_to_phy_id[i].subsys_devid ==
-                    tp->pdev->subsystem_device)) {
-                       tp->phy_id = subsys_id_to_phy_id[i].phy_id;
-                       break;
+                    tp->pdev->subsystem_device))
+                       return &subsys_id_to_phy_id[i];
+       }
+       return NULL;
+}
+
+static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+{
+       u32 val;
+       u16 pmcsr;
+
+       /* On some early chips the SRAM cannot be accessed in D3hot state,
+        * so need make sure we're in D0.
+        */
+       pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
+       pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+       pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
+       msleep(1);
+
+       /* Make sure register accesses (indirect or otherwise)
+        * will function correctly.
+        */
+       pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+                              tp->misc_host_ctrl);
+
+       /* The memory arbiter has to be enabled in order for SRAM accesses
+        * to succeed.  Normally on powerup the tg3 chip firmware will make
+        * sure it is enabled, but other entities such as system netboot
+        * code might disable it.
+        */
+       val = tr32(MEMARB_MODE);
+       tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+       tp->phy_id = PHY_ID_INVALID;
+       tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+       /* Assume an onboard device by default.  */
+       tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
+                       tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+                       tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
                }
+               return;
        }
 
-       eeprom_phy_id = PHY_ID_INVALID;
-       eeprom_signature_found = 0;
        tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
        if (val == NIC_SRAM_DATA_SIG_MAGIC) {
                u32 nic_cfg, led_cfg;
+               u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
+               int eeprom_phy_serdes = 0;
 
                tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
                tp->nic_sram_data_cfg = nic_cfg;
 
-               eeprom_signature_found = 1;
+               tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
+               ver >>= NIC_SRAM_DATA_VER_SHIFT;
+               if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
+                   (ver > 0) && (ver < 0x100))
+                       tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
 
                if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
-                   NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
-                       eeprom_phy_id = PHY_ID_SERDES;
-               } else {
-                       u32 nic_phy_id;
+                   NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
+                       eeprom_phy_serdes = 1;
 
-                       tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
-                       if (nic_phy_id != 0) {
-                               u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
-                               u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
+               tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
+               if (nic_phy_id != 0) {
+                       u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
+                       u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
 
-                               eeprom_phy_id  = (id1 >> 16) << 10;
-                               eeprom_phy_id |= (id2 & 0xfc00) << 16;
-                               eeprom_phy_id |= (id2 & 0x03ff) <<  0;
-                       }
+                       eeprom_phy_id  = (id1 >> 16) << 10;
+                       eeprom_phy_id |= (id2 & 0xfc00) << 16;
+                       eeprom_phy_id |= (id2 & 0x03ff) <<  0;
+               } else
+                       eeprom_phy_id = 0;
+
+               tp->phy_id = eeprom_phy_id;
+               if (eeprom_phy_serdes) {
+                       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+                               tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
+                       else
+                               tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
-                       tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &led_cfg);
-                       led_cfg &= (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
+               if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+                       led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
                                    SHASTA_EXT_LED_MODE_MASK);
-               else
+               else
                        led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
 
                switch (led_cfg) {
@@ -6968,6 +10056,16 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
 
                case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
                        tp->led_ctrl = LED_CTRL_MODE_MAC;
+
+                       /* Default to PHY_1_MODE if 0 (MAC_MODE) is
+                        * read on some older 5700/5701 bootcode.
+                        */
+                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                           ASIC_REV_5700 ||
+                           GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                           ASIC_REV_5701)
+                               tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
                        break;
 
                case SHASTA_EXT_LED_SHARED:
@@ -6996,20 +10094,41 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
                        tp->led_ctrl = LED_CTRL_MODE_PHY_2;
 
-               if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
-                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
-                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
-                   (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
+               if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
                        tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+                       if ((tp->pdev->subsystem_vendor ==
+                            PCI_VENDOR_ID_ARIMA) &&
+                           (tp->pdev->subsystem_device == 0x205a ||
+                            tp->pdev->subsystem_device == 0x2063))
+                               tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+               } else {
+                       tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+                       tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+               }
 
                if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
                        tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+                       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
                                tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
                }
                if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
                        tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
+
+               if (cfg2 & (1 << 17))
+                       tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
+
+               /* serdes signal pre-emphasis in register 0x590 set by */
+               /* bootcode if bit 18 is set */
+               if (cfg2 & (1 << 18))
+                       tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
        }
+}
+
+static int __devinit tg3_phy_probe(struct tg3 *tp)
+{
+       u32 hw_phy_id_1, hw_phy_id_2;
+       u32 hw_phy_id, hw_phy_id_masked;
+       int err;
 
        /* Reading the PHY ID register can conflict with ASF
         * firwmare access to the PHY hardware.
@@ -7035,29 +10154,41 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
 
        if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
                tp->phy_id = hw_phy_id;
+               if (hw_phy_id_masked == PHY_ID_BCM8002)
+                       tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+               else
+                       tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
        } else {
-               /* phy_id currently holds the value found in the
-                * subsys_id_to_phy_id[] table or PHY_ID_INVALID
-                * if a match was not found there.
-                */
-               if (tp->phy_id == PHY_ID_INVALID) {
-                       if (!eeprom_signature_found ||
-                           !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
+               if (tp->phy_id != PHY_ID_INVALID) {
+                       /* Do nothing, phy ID already set up in
+                        * tg3_get_eeprom_hw_cfg().
+                        */
+               } else {
+                       struct subsys_tbl_ent *p;
+
+                       /* No eeprom signature?  Try the hardcoded
+                        * subsys device table.
+                        */
+                       p = lookup_by_subsys(tp);
+                       if (!p)
                                return -ENODEV;
-                       tp->phy_id = eeprom_phy_id;
+
+                       tp->phy_id = p->phy_id;
+                       if (!tp->phy_id ||
+                           tp->phy_id == PHY_ID_BCM8002)
+                               tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
                }
        }
 
-       if (tp->phy_id != PHY_ID_SERDES &&
+       if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
-               u32 bmsr, adv_reg, tg3_ctrl;
+               u32 bmsr, adv_reg, tg3_ctrl, mask;
 
                tg3_readphy(tp, MII_BMSR, &bmsr);
-               tg3_readphy(tp, MII_BMSR, &bmsr);
-
-               if (bmsr & BMSR_LSTATUS)
+               if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+                   (bmsr & BMSR_LSTATUS))
                        goto skip_phy_reset;
-                   
+
                err = tg3_phy_reset(tp);
                if (err)
                        return err;
@@ -7075,7 +10206,10 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
                }
 
-               if (!tg3_copper_is_advertising_all(tp)) {
+               mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
+               if (!tg3_copper_is_advertising_all(tp, mask)) {
                        tg3_writephy(tp, MII_ADVERTISE, adv_reg);
 
                        if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
@@ -7102,10 +10236,7 @@ skip_phy_reset:
                err = tg3_init_5401phy_dsp(tp);
        }
 
-       if (!eeprom_signature_found)
-               tp->led_ctrl = LED_CTRL_MODE_PHY_1;
-
-       if (tp->phy_id == PHY_ID_SERDES)
+       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
                tp->link_config.advertising =
                        (ADVERTISED_1000baseT_Half |
                         ADVERTISED_1000baseT_Full |
@@ -7122,32 +10253,55 @@ skip_phy_reset:
 static void __devinit tg3_read_partno(struct tg3 *tp)
 {
        unsigned char vpd_data[256];
-       int i;
+       unsigned int i;
+       u32 magic;
 
-       if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
-               /* Sun decided not to put the necessary bits in the
-                * NVRAM of their onboard tg3 parts :(
-                */
-               strcpy(tp->board_part_number, "Sun 5704");
-               return;
-       }
+       if (tg3_nvram_read_swab(tp, 0x0, &magic))
+               goto out_not_found;
 
-       for (i = 0; i < 256; i += 4) {
-               u32 tmp;
+       if (magic == TG3_EEPROM_MAGIC) {
+               for (i = 0; i < 256; i += 4) {
+                       u32 tmp;
 
-               if (tg3_nvram_read(tp, 0x100 + i, &tmp))
-                       goto out_not_found;
+                       if (tg3_nvram_read(tp, 0x100 + i, &tmp))
+                               goto out_not_found;
+
+                       vpd_data[i + 0] = ((tmp >>  0) & 0xff);
+                       vpd_data[i + 1] = ((tmp >>  8) & 0xff);
+                       vpd_data[i + 2] = ((tmp >> 16) & 0xff);
+                       vpd_data[i + 3] = ((tmp >> 24) & 0xff);
+               }
+       } else {
+               int vpd_cap;
+
+               vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
+               for (i = 0; i < 256; i += 4) {
+                       u32 tmp, j = 0;
+                       u16 tmp16;
+
+                       pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
+                                             i);
+                       while (j++ < 100) {
+                               pci_read_config_word(tp->pdev, vpd_cap +
+                                                    PCI_VPD_ADDR, &tmp16);
+                               if (tmp16 & 0x8000)
+                                       break;
+                               msleep(1);
+                       }
+                       if (!(tmp16 & 0x8000))
+                               goto out_not_found;
 
-               vpd_data[i + 0] = ((tmp >>  0) & 0xff);
-               vpd_data[i + 1] = ((tmp >>  8) & 0xff);
-               vpd_data[i + 2] = ((tmp >> 16) & 0xff);
-               vpd_data[i + 3] = ((tmp >> 24) & 0xff);
+                       pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
+                                             &tmp);
+                       tmp = cpu_to_le32(tmp);
+                       memcpy(&vpd_data[i], &tmp, 4);
+               }
        }
 
        /* Now parse and find the part number. */
-       for (i = 0; i < 256; ) {
+       for (i = 0; i < 254; ) {
                unsigned char val = vpd_data[i];
-               int block_end;
+               unsigned int block_end;
 
                if (val == 0x82 || val == 0x91) {
                        i = (i + 3 +
@@ -7163,21 +10317,26 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
                             (vpd_data[i + 1] +
                              (vpd_data[i + 2] << 8)));
                i += 3;
-               while (i < block_end) {
+
+               if (block_end > 256)
+                       goto out_not_found;
+
+               while (i < (block_end - 2)) {
                        if (vpd_data[i + 0] == 'P' &&
                            vpd_data[i + 1] == 'N') {
                                int partno_len = vpd_data[i + 2];
 
-                               if (partno_len > 24)
+                               i += 3;
+                               if (partno_len > 24 || (partno_len + i) > 256)
                                        goto out_not_found;
 
                                memcpy(tp->board_part_number,
-                                      &vpd_data[i + 3],
-                                      partno_len);
+                                      &vpd_data[i], partno_len);
 
                                /* Success. */
                                return;
                        }
+                       i += 3 + vpd_data[i + 2];
                }
 
                /* Part number not found. */
@@ -7185,68 +10344,69 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
        }
 
 out_not_found:
-       strcpy(tp->board_part_number, "none");
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+               strcpy(tp->board_part_number, "BCM95906");
+       else
+               strcpy(tp->board_part_number, "none");
 }
 
-#ifdef CONFIG_SPARC64
-static int __devinit tg3_is_sun_5704(struct tg3 *tp)
+static void __devinit tg3_read_fw_ver(struct tg3 *tp)
 {
-       struct pci_dev *pdev = tp->pdev;
-       struct pcidev_cookie *pcp = pdev->sysdata;
+       u32 val, offset, start;
 
-       if (pcp != NULL) {
-               int node = pcp->prom_node;
-               u32 venid, devid;
-               int err;
+       if (tg3_nvram_read_swab(tp, 0, &val))
+               return;
 
-               err = prom_getproperty(node, "subsystem-vendor-id",
-                                      (char *) &venid, sizeof(venid));
-               if (err == 0 || err == -1)
-                       return 0;
-               err = prom_getproperty(node, "subsystem-id",
-                                      (char *) &devid, sizeof(devid));
-               if (err == 0 || err == -1)
-                       return 0;
+       if (val != TG3_EEPROM_MAGIC)
+               return;
+
+       if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
+           tg3_nvram_read_swab(tp, 0x4, &start))
+               return;
+
+       offset = tg3_nvram_logical_addr(tp, offset);
+       if (tg3_nvram_read_swab(tp, offset, &val))
+               return;
+
+       if ((val & 0xfc000000) == 0x0c000000) {
+               u32 ver_offset, addr;
+               int i;
+
+               if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
+                   tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
+                       return;
+
+               if (val != 0)
+                       return;
+
+               addr = offset + ver_offset - start;
+               for (i = 0; i < 16; i += 4) {
+                       if (tg3_nvram_read(tp, addr + i, &val))
+                               return;
 
-               if (venid == PCI_VENDOR_ID_SUN &&
-                   devid == PCI_DEVICE_ID_TIGON3_5704)
-                       return 1;
+                       val = cpu_to_le32(val);
+                       memcpy(tp->fw_ver + i, &val, 4);
+               }
        }
-       return 0;
 }
-#endif
 
 static int __devinit tg3_get_invariants(struct tg3 *tp)
 {
+       static struct pci_device_id write_reorder_chipsets[] = {
+               { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+                            PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+               { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+                            PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+               { PCI_DEVICE(PCI_VENDOR_ID_VIA,
+                            PCI_DEVICE_ID_VIA_8385_0) },
+               { },
+       };
        u32 misc_ctrl_reg;
        u32 cacheline_sz_reg;
        u32 pci_state_reg, grc_misc_cfg;
        u32 val;
        u16 pci_cmd;
-       int err;
-
-#ifdef CONFIG_SPARC64
-       if (tg3_is_sun_5704(tp))
-               tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
-#endif
-
-       /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
-        * reordering to the mailbox registers done by the host
-        * controller can cause major troubles.  We read back from
-        * every mailbox register write to force the writes to be
-        * posted to the chip in order.
-        */
-       if (pci_find_device(PCI_VENDOR_ID_INTEL,
-                           PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
-           pci_find_device(PCI_VENDOR_ID_INTEL,
-                           PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
-           pci_find_device(PCI_VENDOR_ID_INTEL,
-                           PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
-           pci_find_device(PCI_VENDOR_ID_INTEL,
-                           PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
-           pci_find_device(PCI_VENDOR_ID_AMD,
-                           PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
-               tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+       int err, pcie_cap;
 
        /* Force memory write invalidate off.  If we leave it on,
         * then on 5700_BX chips we have to enable a workaround.
@@ -7271,6 +10431,106 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        tp->pci_chip_rev_id = (misc_ctrl_reg >>
                               MISC_HOST_CTRL_CHIPREV_SHIFT);
 
+       /* Wrong chip ID in 5752 A0. This code can be removed later
+        * as A0 is not in production.
+        */
+       if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+               tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+
+       /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
+        * we need to disable memory and use config. cycles
+        * only to access all registers. The 5702/03 chips
+        * can mistakenly decode the special cycles from the
+        * ICH chipsets as memory write cycles, causing corruption
+        * of register and memory space. Only certain ICH bridges
+        * will drive special cycles with non-zero data during the
+        * address phase which can fall within the 5703's address
+        * range. This is not an ICH bug as the PCI spec allows
+        * non-zero address during special cycles. However, only
+        * these ICH bridges are known to drive non-zero addresses
+        * during special cycles.
+        *
+        * Since special cycles do not cross PCI bridges, we only
+        * enable this workaround if the 5703 is on the secondary
+        * bus of these ICH bridges.
+        */
+       if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
+           (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+               static struct tg3_dev_id {
+                       u32     vendor;
+                       u32     device;
+                       u32     rev;
+               } ich_chipsets[] = {
+                       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
+                         PCI_ANY_ID },
+                       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
+                         PCI_ANY_ID },
+                       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
+                         0xa },
+                       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
+                         PCI_ANY_ID },
+                       { },
+               };
+               struct tg3_dev_id *pci_id = &ich_chipsets[0];
+               struct pci_dev *bridge = NULL;
+
+               while (pci_id->vendor != 0) {
+                       bridge = pci_get_device(pci_id->vendor, pci_id->device,
+                                               bridge);
+                       if (!bridge) {
+                               pci_id++;
+                               continue;
+                       }
+                       if (pci_id->rev != PCI_ANY_ID) {
+                               u8 rev;
+
+                               pci_read_config_byte(bridge, PCI_REVISION_ID,
+                                                    &rev);
+                               if (rev > pci_id->rev)
+                                       continue;
+                       }
+                       if (bridge->subordinate &&
+                           (bridge->subordinate->number ==
+                            tp->pdev->bus->number)) {
+
+                               tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
+                               pci_dev_put(bridge);
+                               break;
+                       }
+               }
+       }
+
+       /* The EPB bridge inside 5714, 5715, and 5780 cannot support
+        * DMA addresses > 40-bit. This bridge may have other additional
+        * 57xx devices behind it in some 4-port NIC designs for example.
+        * Any tg3 device found behind the bridge will also need the 40-bit
+        * DMA workaround.
+        */
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+               tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
+               tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
+               tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+       }
+       else {
+               struct pci_dev *bridge = NULL;
+
+               do {
+                       bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+                                               PCI_DEVICE_ID_SERVERWORKS_EPB,
+                                               bridge);
+                       if (bridge && bridge->subordinate &&
+                           (bridge->subordinate->number <=
+                            tp->pdev->bus->number) &&
+                           (bridge->subordinate->subordinate >=
+                            tp->pdev->bus->number)) {
+                               tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
+                               pci_dev_put(bridge);
+                               break;
+                       }
+               } while (bridge);
+       }
+
        /* Initialize misc host control in PCI block. */
        tp->misc_host_ctrl |= (misc_ctrl_reg &
                               MISC_HOST_CTRL_CHIPREV);
@@ -7285,8 +10545,65 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
        tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
 
-       if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+               tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
+
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
+           (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
+               tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
+
+       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                       tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
+                       tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
+               } else {
+                       tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
+                                         TG3_FLG2_HW_TSO_1_BUG;
+                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+                               ASIC_REV_5750 &&
+                           tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+                               tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
+               }
+       }
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+               tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
+
+       pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
+       if (pcie_cap != 0) {
                tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                       u16 lnkctl;
+
+                       pci_read_config_word(tp->pdev,
+                                            pcie_cap + PCI_EXP_LNKCTL,
+                                            &lnkctl);
+                       if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
+                               tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
+               }
+       }
+
+       /* If we have an AMD 762 or VIA K8T800 chipset, write
+        * reordering to the mailbox registers done by the host
+        * controller can cause major troubles.  We read back from
+        * every mailbox register write to force the writes to be
+        * posted to the chip in order.
+        */
+       if (pci_dev_present(write_reorder_chipsets) &&
+           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+               tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
            tp->pci_lat_timer < 64) {
@@ -7337,6 +10654,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                }
        }
 
+       /* 5700 BX chips need to have their TX producer index mailboxes
+        * written twice to workaround a bug.
+        */
+       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
+               tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
+
        /* Back to back register writes can cause problems on this chip,
         * the workaround is to read back all reg writes except those to
         * mailbox regs.  See tg3_write_indirect_reg32().
@@ -7353,15 +10676,92 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
                tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
 
-       /* Chip-specific fixup from Broadcom driver */
-       if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
-           (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
-               pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
-               pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
-       }
+       /* Chip-specific fixup from Broadcom driver */
+       if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+           (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
+               pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
+               pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
+       }
+
+       /* Default fast path register access methods */
+       tp->read32 = tg3_read32;
+       tp->write32 = tg3_write32;
+       tp->read32_mbox = tg3_read32;
+       tp->write32_mbox = tg3_write32;
+       tp->write32_tx_mbox = tg3_write32;
+       tp->write32_rx_mbox = tg3_write32;
+
+       /* Various workaround register access methods */
+       if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
+               tp->write32 = tg3_write_indirect_reg32;
+       else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
+               tp->write32 = tg3_write_flush_reg32;
+
+       if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
+           (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
+               tp->write32_tx_mbox = tg3_write32_tx_mbox;
+               if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
+                       tp->write32_rx_mbox = tg3_write_flush_reg32;
+       }
+
+       if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
+               tp->read32 = tg3_read_indirect_reg32;
+               tp->write32 = tg3_write_indirect_reg32;
+               tp->read32_mbox = tg3_read_indirect_mbox;
+               tp->write32_mbox = tg3_write_indirect_mbox;
+               tp->write32_tx_mbox = tg3_write_indirect_mbox;
+               tp->write32_rx_mbox = tg3_write_indirect_mbox;
+
+               iounmap(tp->regs);
+               tp->regs = NULL;
+
+               pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+               pci_cmd &= ~PCI_COMMAND_MEMORY;
+               pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+       }
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               tp->read32_mbox = tg3_read32_mbox_5906;
+               tp->write32_mbox = tg3_write32_mbox_5906;
+               tp->write32_tx_mbox = tg3_write32_mbox_5906;
+               tp->write32_rx_mbox = tg3_write32_mbox_5906;
+       }
+
+       if (tp->write32 == tg3_write_indirect_reg32 ||
+           ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
+            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+               tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
+
+       /* Get eeprom hw config before calling tg3_set_power_state().
+        * In particular, the TG3_FLG2_IS_NIC flag must be
+        * determined before calling tg3_set_power_state() so that
+        * we know whether or not to switch out of Vaux power.
+        * When the flag is set, it means that GPIO1 is used for eeprom
+        * write protect and also implies that it is a LOM where GPIOs
+        * are not used to switch power.
+        */
+       tg3_get_eeprom_hw_cfg(tp);
+
+       /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
+        * GPIO1 driven high will bring 5700's external PHY out of reset.
+        * It is also used as eeprom write protect on LOMs.
+        */
+       tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
+           (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
+               tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+                                      GRC_LCLCTRL_GPIO_OUTPUT1);
+       /* Unused GPIO3 must be driven as output on 5752 because there
+        * are no pull-up resistors on unused GPIO pins.
+        */
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+               tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+               tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 
        /* Force the chip into D0. */
-       err = tg3_set_power_state(tp, 0);
+       err = tg3_set_power_state(tp, PCI_D0);
        if (err) {
                printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
                       pci_name(tp->pdev));
@@ -7374,20 +10774,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
                tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
 
-       /* Pseudo-header checksum is done by hardware logic and not
-        * the offload processers, so make the chip do the pseudo-
-        * header checksums on receive.  For transmit it is more
-        * convenient to do the pseudo-header checksum in software
-        * as Linux does that on transmit for us in all cases.
-        */
-       tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
-       tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
-
        /* Derive initial jumbo mode from MTU assigned in
         * ether_setup() via the alloc_etherdev() call
         */
-       if (tp->dev->mtu > ETH_DATA_LEN)
-               tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
+       if (tp->dev->mtu > ETH_DATA_LEN &&
+           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+               tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
 
        /* Determine WakeOnLan speed to use. */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -7403,7 +10795,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
            ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
             (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
-            (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
+            (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
+           (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
                tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
 
        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
@@ -7412,19 +10806,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
                tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
-               tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
+                       tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
+                       if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
+                               tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
+               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+                       tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+       }
 
-       /* Only 5701 and later support tagged irq status mode.
-        * Also, 5788 chips cannot use tagged irq status.
-        *
-        * However, since we are using NAPI avoid tagged irq status
-        * because the interrupt condition is more difficult to
-        * fully clear in that mode.
-        */
        tp->coalesce_mode = 0;
-
        if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
            GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
                tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -7453,7 +10845,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                    chiprevid == CHIPREV_ID_5701_B0 ||
                    chiprevid == CHIPREV_ID_5701_B2 ||
                    chiprevid == CHIPREV_ID_5701_B5) {
-                       unsigned long sram_base;
+                       void __iomem *sram_base;
 
                        /* Write some dummy words into the SRAM status block
                         * area, see if it reads back correctly.  If the return
@@ -7472,37 +10864,34 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        udelay(50);
        tg3_nvram_init(tp);
 
-       /* Always use host TXDs, it performs better in particular
-        * with multi-frag packets.  The tests below are kept here
-        * as documentation should we change this decision again
-        * in the future.
-        */
-       tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
-
-#if 0
-       /* Determine if TX descriptors will reside in
-        * main memory or in the chip SRAM.
-        */
-       if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
-               tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
-#endif
-
        grc_misc_cfg = tr32(GRC_MISC_CFG);
        grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
 
+       /* Broadcom's driver says that CIOBE multisplit has a bug */
+#if 0
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
            grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
                tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
                tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
        }
-
+#endif
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
            (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
                tp->tg3_flags2 |= TG3_FLG2_IS_5788;
 
+       if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+           (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
+               tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
+       if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+               tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
+                                     HOSTCC_MODE_CLRTICK_TXBD);
+
+               tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
+               pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+                                      tp->misc_host_ctrl);
+       }
+
        /* these are limited to 10/100 only */
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -7512,7 +10901,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
            (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
-            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F))
+            (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
+             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
+             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
 
        err = tg3_phy_probe(tp);
@@ -7523,8 +10915,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        }
 
        tg3_read_partno(tp);
+       tg3_read_fw_ver(tp);
 
-       if (tp->phy_id == PHY_ID_SERDES) {
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
                tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
        } else {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
@@ -7547,35 +10940,42 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         * upon subsystem IDs.
         */
        if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
-           tp->phy_id != PHY_ID_SERDES) {
+           !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
                tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
                                  TG3_FLAG_USE_LINKCHG_REG);
        }
 
        /* For all SERDES we poll the MAC status register. */
-       if (tp->phy_id == PHY_ID_SERDES)
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
                tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
        else
                tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 
-       /* 5700 BX chips need to have their TX producer index mailboxes
-        * written twice to workaround a bug.
-        */
-       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
-               tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
-       else
-               tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
-
-       /* It seems all chips can get confused if TX buffers
+       /* All chips before 5787 can get confused if TX buffers
         * straddle the 4GB address boundary in some cases.
         */
-       tp->dev->hard_start_xmit = tg3_start_xmit;
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+               tp->dev->hard_start_xmit = tg3_start_xmit;
+       else
+               tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
 
        tp->rx_offset = 2;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
                tp->rx_offset = 0;
 
+       tp->rx_std_max_post = TG3_RX_RING_SIZE;
+
+       /* Increment the rx prod index on the rx std ring by at most
+        * 8 for these chips to workaround hw errata.
+        */
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+               tp->rx_std_max_post = 8;
+
        /* By default, disable wake-on-lan.  User can change this
         * using ETHTOOL_SWOL.
         */
@@ -7592,11 +10992,14 @@ static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
        struct pcidev_cookie *pcp = pdev->sysdata;
 
        if (pcp != NULL) {
-               int node = pcp->prom_node;
+               unsigned char *addr;
+               int len;
 
-               if (prom_getproplen(node, "local-mac-address") == 6) {
-                       prom_getproperty(node, "local-mac-address",
-                                        dev->dev_addr, 6);
+               addr = of_get_property(pcp->prom_node, "local-mac-address",
+                                       &len);
+               if (addr && len == 6) {
+                       memcpy(dev->dev_addr, addr, 6);
+                       memcpy(dev->perm_addr, dev->dev_addr, 6);
                        return 0;
                }
        }
@@ -7608,6 +11011,7 @@ static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
        struct net_device *dev = tp->dev;
 
        memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
        return 0;
 }
 #endif
@@ -7616,6 +11020,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
 {
        struct net_device *dev = tp->dev;
        u32 hi, lo, mac_offset;
+       int addr_ok = 0;
 
 #ifdef CONFIG_SPARC64
        if (!tg3_get_macaddr_sparc(tp))
@@ -7623,8 +11028,8 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
 #endif
 
        mac_offset = 0x7c;
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
-           !(tp->tg3_flags & TG3_FLG2_SUN_5704)) {
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
                if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
                        mac_offset = 0xcc;
                if (tg3_nvram_lock(tp))
@@ -7632,6 +11037,8 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
                else
                        tg3_nvram_unlock(tp);
        }
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+               mac_offset = 0x10;
 
        /* First try to get it from MAC address mailbox. */
        tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
@@ -7644,29 +11051,33 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
                dev->dev_addr[3] = (lo >> 16) & 0xff;
                dev->dev_addr[4] = (lo >>  8) & 0xff;
                dev->dev_addr[5] = (lo >>  0) & 0xff;
-       }
-       /* Next, try NVRAM. */
-       else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
-                !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
-                !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
-               dev->dev_addr[0] = ((hi >> 16) & 0xff);
-               dev->dev_addr[1] = ((hi >> 24) & 0xff);
-               dev->dev_addr[2] = ((lo >>  0) & 0xff);
-               dev->dev_addr[3] = ((lo >>  8) & 0xff);
-               dev->dev_addr[4] = ((lo >> 16) & 0xff);
-               dev->dev_addr[5] = ((lo >> 24) & 0xff);
-       }
-       /* Finally just fetch it out of the MAC control regs. */
-       else {
-               hi = tr32(MAC_ADDR_0_HIGH);
-               lo = tr32(MAC_ADDR_0_LOW);
 
-               dev->dev_addr[5] = lo & 0xff;
-               dev->dev_addr[4] = (lo >> 8) & 0xff;
-               dev->dev_addr[3] = (lo >> 16) & 0xff;
-               dev->dev_addr[2] = (lo >> 24) & 0xff;
-               dev->dev_addr[1] = hi & 0xff;
-               dev->dev_addr[0] = (hi >> 8) & 0xff;
+               /* Some old bootcode may report a 0 MAC address in SRAM */
+               addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+       }
+       if (!addr_ok) {
+               /* Next, try NVRAM. */
+               if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
+                   !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
+                       dev->dev_addr[0] = ((hi >> 16) & 0xff);
+                       dev->dev_addr[1] = ((hi >> 24) & 0xff);
+                       dev->dev_addr[2] = ((lo >>  0) & 0xff);
+                       dev->dev_addr[3] = ((lo >>  8) & 0xff);
+                       dev->dev_addr[4] = ((lo >> 16) & 0xff);
+                       dev->dev_addr[5] = ((lo >> 24) & 0xff);
+               }
+               /* Finally just fetch it out of the MAC control regs. */
+               else {
+                       hi = tr32(MAC_ADDR_0_HIGH);
+                       lo = tr32(MAC_ADDR_0_LOW);
+
+                       dev->dev_addr[5] = lo & 0xff;
+                       dev->dev_addr[4] = (lo >> 8) & 0xff;
+                       dev->dev_addr[3] = (lo >> 16) & 0xff;
+                       dev->dev_addr[2] = (lo >> 24) & 0xff;
+                       dev->dev_addr[1] = hi & 0xff;
+                       dev->dev_addr[0] = (hi >> 8) & 0xff;
+               }
        }
 
        if (!is_valid_ether_addr(&dev->dev_addr[0])) {
@@ -7676,9 +11087,150 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
 #endif
                return -EINVAL;
        }
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
        return 0;
 }
 
+#define BOUNDARY_SINGLE_CACHELINE      1
+#define BOUNDARY_MULTI_CACHELINE       2
+
+static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+{
+       int cacheline_size;
+       u8 byte;
+       int goal;
+
+       pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+       if (byte == 0)
+               cacheline_size = 1024;
+       else
+               cacheline_size = (int) byte * 4;
+
+       /* On 5703 and later chips, the boundary bits have no
+        * effect.
+        */
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+               goto out;
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+       goal = BOUNDARY_MULTI_CACHELINE;
+#else
+#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
+       goal = BOUNDARY_SINGLE_CACHELINE;
+#else
+       goal = 0;
+#endif
+#endif
+
+       if (!goal)
+               goto out;
+
+       /* PCI controllers on most RISC systems tend to disconnect
+        * when a device tries to burst across a cache-line boundary.
+        * Therefore, letting tg3 do so just wastes PCI bandwidth.
+        *
+        * Unfortunately, for PCI-E there are only limited
+        * write-side controls for this, and thus for reads
+        * we will still get the disconnects.  We'll also waste
+        * these PCI cycles for both read and write for chips
+        * other than 5700 and 5701 which do not implement the
+        * boundary bits.
+        */
+       if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
+           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+               switch (cacheline_size) {
+               case 16:
+               case 32:
+               case 64:
+               case 128:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
+                                       DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
+                       } else {
+                               val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+                                       DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+                       }
+                       break;
+
+               case 256:
+                       val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
+                               DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
+                       break;
+
+               default:
+                       val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+                               DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+                       break;
+               };
+       } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+               switch (cacheline_size) {
+               case 16:
+               case 32:
+               case 64:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+                               val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
+                               break;
+                       }
+                       /* fallthrough */
+               case 128:
+               default:
+                       val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+                       val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+                       break;
+               };
+       } else {
+               switch (cacheline_size) {
+               case 16:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_16 |
+                                       DMA_RWCTRL_WRITE_BNDRY_16);
+                               break;
+                       }
+                       /* fallthrough */
+               case 32:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_32 |
+                                       DMA_RWCTRL_WRITE_BNDRY_32);
+                               break;
+                       }
+                       /* fallthrough */
+               case 64:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_64 |
+                                       DMA_RWCTRL_WRITE_BNDRY_64);
+                               break;
+                       }
+                       /* fallthrough */
+               case 128:
+                       if (goal == BOUNDARY_SINGLE_CACHELINE) {
+                               val |= (DMA_RWCTRL_READ_BNDRY_128 |
+                                       DMA_RWCTRL_WRITE_BNDRY_128);
+                               break;
+                       }
+                       /* fallthrough */
+               case 256:
+                       val |= (DMA_RWCTRL_READ_BNDRY_256 |
+                               DMA_RWCTRL_WRITE_BNDRY_256);
+                       break;
+               case 512:
+                       val |= (DMA_RWCTRL_READ_BNDRY_512 |
+                               DMA_RWCTRL_WRITE_BNDRY_512);
+                       break;
+               case 1024:
+               default:
+                       val |= (DMA_RWCTRL_READ_BNDRY_1024 |
+                               DMA_RWCTRL_WRITE_BNDRY_1024);
+                       break;
+               };
+       }
+
+out:
+       return val;
+}
+
 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
 {
        struct tg3_internal_buffer_desc test_desc;
@@ -7760,12 +11312,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
        return ret;
 }
 
-#define TEST_BUFFER_SIZE       0x400
+#define TEST_BUFFER_SIZE       0x2000
 
 static int __devinit tg3_test_dma(struct tg3 *tp)
 {
        dma_addr_t buf_dma;
-       u32 *buf;
+       u32 *buf, saved_dma_rwctrl;
        int ret;
 
        buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -7777,49 +11329,11 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
        tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
                          (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
 
-#ifndef CONFIG_X86
-       {
-               u8 byte;
-               int cacheline_size;
-               pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
-
-               if (byte == 0)
-                       cacheline_size = 1024;
-               else
-                       cacheline_size = (int) byte * 4;
-
-               switch (cacheline_size) {
-               case 16:
-               case 32:
-               case 64:
-               case 128:
-                       if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
-                           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
-                               break;
-                       } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
-                               tp->dma_rwctrl &=
-                                       ~(DMA_RWCTRL_PCI_WRITE_CMD);
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
-                               break;
-                       }
-                       /* fallthrough */
-               case 256:
-                       if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
-                           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_256;
-                       else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
-                               tp->dma_rwctrl |=
-                                       DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
-               };
-       }
-#endif
+       tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
        if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
-               tp->dma_rwctrl |= 0x001f0000;
+               /* DMA read watermark not used on PCIE */
+               tp->dma_rwctrl |= 0x00180000;
        } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
@@ -7831,11 +11345,24 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
                        u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
 
-                       if (ccval == 0x6 || ccval == 0x7)
+                       /* If the 5704 is behind the EPB bridge, we can
+                        * do the less restrictive ONE_DMA workaround for
+                        * better performance.
+                        */
+                       if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
+                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+                               tp->dma_rwctrl |= 0x8000;
+                       else if (ccval == 0x6 || ccval == 0x7)
                                tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
 
-                       /* Set bit 23 to renable PCIX hw bug fix */
+                       /* Set bit 23 to enable PCIX hw bug fix */
                        tp->dma_rwctrl |= 0x009f0000;
+               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+                       /* 5780 always in PCIX mode */
+                       tp->dma_rwctrl |= 0x00144000;
+               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+                       /* 5714 always in PCIX mode */
+                       tp->dma_rwctrl |= 0x00148000;
                } else {
                        tp->dma_rwctrl |= 0x001b000f;
                }
@@ -7875,6 +11402,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
                goto out;
 
+       /* It is best to perform DMA test with maximum write burst size
+        * to expose the 5700/5701 write DMA bug.
+        */
+       saved_dma_rwctrl = tp->dma_rwctrl;
+       tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+       tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
        while (1) {
                u32 *p = buf, i;
 
@@ -7913,8 +11447,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                        if (p[i] == i)
                                continue;
 
-                       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
-                           DMA_RWCTRL_WRITE_BNDRY_DISAB) {
+                       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+                           DMA_RWCTRL_WRITE_BNDRY_16) {
+                               tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
                                tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
                                tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
                                break;
@@ -7931,6 +11466,28 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                        break;
                }
        }
+       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+           DMA_RWCTRL_WRITE_BNDRY_16) {
+               static struct pci_device_id dma_wait_state_chipsets[] = {
+                       { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
+                                    PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+                       { },
+               };
+
+               /* DMA test passed without adjusting DMA boundary,
+                * now look for chipsets that are known to expose the
+                * DMA bug without failing the test.
+                */
+               if (pci_dev_present(dma_wait_state_chipsets)) {
+                       tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+                       tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+               }
+               else
+                       /* Safe to use the calculated DMA boundary. */
+                       tp->dma_rwctrl = saved_dma_rwctrl;
+
+               tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+       }
 
 out:
        pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -7948,7 +11505,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
        tp->link_config.speed = SPEED_INVALID;
        tp->link_config.duplex = DUPLEX_INVALID;
        tp->link_config.autoneg = AUTONEG_ENABLE;
-       netif_carrier_off(tp->dev);
        tp->link_config.active_speed = SPEED_INVALID;
        tp->link_config.active_duplex = DUPLEX_INVALID;
        tp->link_config.phy_is_low_power = 0;
@@ -7959,19 +11515,41 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
 
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
-       tp->bufmgr_config.mbuf_read_dma_low_water =
-               DEFAULT_MB_RDMA_LOW_WATER;
-       tp->bufmgr_config.mbuf_mac_rx_low_water =
-               DEFAULT_MB_MACRX_LOW_WATER;
-       tp->bufmgr_config.mbuf_high_water =
-               DEFAULT_MB_HIGH_WATER;
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               tp->bufmgr_config.mbuf_read_dma_low_water =
+                       DEFAULT_MB_RDMA_LOW_WATER_5705;
+               tp->bufmgr_config.mbuf_mac_rx_low_water =
+                       DEFAULT_MB_MACRX_LOW_WATER_5705;
+               tp->bufmgr_config.mbuf_high_water =
+                       DEFAULT_MB_HIGH_WATER_5705;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+                       tp->bufmgr_config.mbuf_mac_rx_low_water =
+                               DEFAULT_MB_MACRX_LOW_WATER_5906;
+                       tp->bufmgr_config.mbuf_high_water =
+                               DEFAULT_MB_HIGH_WATER_5906;
+               }
+
+               tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+                       DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
+               tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+                       DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
+               tp->bufmgr_config.mbuf_high_water_jumbo =
+                       DEFAULT_MB_HIGH_WATER_JUMBO_5780;
+       } else {
+               tp->bufmgr_config.mbuf_read_dma_low_water =
+                       DEFAULT_MB_RDMA_LOW_WATER;
+               tp->bufmgr_config.mbuf_mac_rx_low_water =
+                       DEFAULT_MB_MACRX_LOW_WATER;
+               tp->bufmgr_config.mbuf_high_water =
+                       DEFAULT_MB_HIGH_WATER;
 
-       tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
-               DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
-       tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
-               DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
-       tp->bufmgr_config.mbuf_high_water_jumbo =
-               DEFAULT_MB_HIGH_WATER_JUMBO;
+               tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+                       DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
+               tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+                       DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
+               tp->bufmgr_config.mbuf_high_water_jumbo =
+                       DEFAULT_MB_HIGH_WATER_JUMBO;
+       }
 
        tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
        tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
@@ -7988,13 +11566,56 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
        case PHY_ID_BCM5704:    return "5704";
        case PHY_ID_BCM5705:    return "5705";
        case PHY_ID_BCM5750:    return "5750";
-       case PHY_ID_BCM8002:    return "8002";
-       case PHY_ID_SERDES:     return "serdes";
+       case PHY_ID_BCM5752:    return "5752";
+       case PHY_ID_BCM5714:    return "5714";
+       case PHY_ID_BCM5780:    return "5780";
+       case PHY_ID_BCM5755:    return "5755";
+       case PHY_ID_BCM5787:    return "5787";
+       case PHY_ID_BCM5756:    return "5722/5756";
+       case PHY_ID_BCM5906:    return "5906";
+       case PHY_ID_BCM8002:    return "8002/serdes";
+       case 0:                 return "serdes";
        default:                return "unknown";
        };
 }
 
-static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
+static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
+{
+       if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+               strcpy(str, "PCI Express");
+               return str;
+       } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+               u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
+
+               strcpy(str, "PCIX:");
+
+               if ((clock_ctrl == 7) ||
+                   ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
+                    GRC_MISC_CFG_BOARD_ID_5704CIOBE))
+                       strcat(str, "133MHz");
+               else if (clock_ctrl == 0)
+                       strcat(str, "33MHz");
+               else if (clock_ctrl == 2)
+                       strcat(str, "50MHz");
+               else if (clock_ctrl == 4)
+                       strcat(str, "66MHz");
+               else if (clock_ctrl == 6)
+                       strcat(str, "100MHz");
+       } else {
+               strcpy(str, "PCI:");
+               if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
+                       strcat(str, "66MHz");
+               else
+                       strcat(str, "33MHz");
+       }
+       if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
+               strcat(str, ":32-bit");
+       else
+               strcat(str, ":64-bit");
+       return str;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
 {
        struct pci_dev *peer;
        unsigned int func, devnr = tp->pdev->devfn & ~7;
@@ -8005,8 +11626,13 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
                        break;
                pci_dev_put(peer);
        }
-       if (!peer || peer == tp->pdev)
-               BUG();
+       /* 5704 can be configured in single-port mode, set peer to
+        * tp->pdev in that case.
+        */
+       if (!peer) {
+               peer = tp->pdev;
+               return peer;
+       }
 
        /*
         * We don't need to keep the refcount elevated; there's no way
@@ -8017,6 +11643,37 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
        return peer;
 }
 
+static void __devinit tg3_init_coal(struct tg3 *tp)
+{
+       struct ethtool_coalesce *ec = &tp->coal;
+
+       memset(ec, 0, sizeof(*ec));
+       ec->cmd = ETHTOOL_GCOALESCE;
+       ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
+       ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
+       ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
+       ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
+       ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
+       ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
+       ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
+       ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
+       ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
+
+       if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
+                                HOSTCC_MODE_CLRTICK_TXBD)) {
+               ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
+               ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
+               ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
+               ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
+       }
+
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               ec->rx_coalesce_usecs_irq = 0;
+               ec->tx_coalesce_usecs_irq = 0;
+               ec->stats_block_coalesce_usecs = 0;
+       }
+}
+
 static int __devinit tg3_init_one(struct pci_dev *pdev,
                                  const struct pci_device_id *ent)
 {
@@ -8024,7 +11681,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        unsigned long tg3reg_base, tg3reg_len;
        struct net_device *dev;
        struct tg3 *tp;
-       int i, err, pci_using_dac, pm_cap;
+       int i, err, pm_cap;
+       char str[40];
+       u64 dma_mask, persist_dma_mask;
 
        if (tg3_version_printed++ == 0)
                printk(KERN_INFO "%s", version);
@@ -8061,26 +11720,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_free_res;
        }
 
-       /* Configure DMA attributes. */
-       err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
-       if (!err) {
-               pci_using_dac = 1;
-               err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
-               if (err < 0) {
-                       printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
-                              "for consistent allocations\n");
-                       goto err_out_free_res;
-               }
-       } else {
-               err = pci_set_dma_mask(pdev, 0xffffffffULL);
-               if (err) {
-                       printk(KERN_ERR PFX "No usable DMA configuration, "
-                              "aborting.\n");
-                       goto err_out_free_res;
-               }
-               pci_using_dac = 0;
-       }
-
        tg3reg_base = pci_resource_start(pdev, 0);
        tg3reg_len = pci_resource_len(pdev, 0);
 
@@ -8094,8 +11733,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        SET_MODULE_OWNER(dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-       if (pci_using_dac)
-               dev->features |= NETIF_F_HIGHDMA;
 #if TG3_VLAN_TAG_USED
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
        dev->vlan_rx_register = tg3_vlan_rx_register;
@@ -8137,11 +11774,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
 #endif
        spin_lock_init(&tp->lock);
-       spin_lock_init(&tp->tx_lock);
        spin_lock_init(&tp->indirect_lock);
-       INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+       INIT_WORK(&tp->reset_task, tg3_reset_task);
 
-       tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
+       tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
        if (tp->regs == 0UL) {
                printk(KERN_ERR PFX "Cannot map device registers, "
                       "aborting.\n");
@@ -8151,8 +11787,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 
        tg3_init_link_config(tp);
 
-       tg3_init_bufmgr_config(tp);
-
        tp->rx_pending = TG3_DEF_RX_RING_PENDING;
        tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
        tp->tx_pending = TG3_DEF_TX_RING_PENDING;
@@ -8181,32 +11815,71 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_iounmap;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
-               tp->bufmgr_config.mbuf_read_dma_low_water =
-                       DEFAULT_MB_RDMA_LOW_WATER_5705;
-               tp->bufmgr_config.mbuf_mac_rx_low_water =
-                       DEFAULT_MB_MACRX_LOW_WATER_5705;
-               tp->bufmgr_config.mbuf_high_water =
-                       DEFAULT_MB_HIGH_WATER_5705;
+       /* The EPB bridge inside 5714, 5715, and 5780 and any
+        * device behind the EPB cannot support DMA addresses > 40-bit.
+        * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+        * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+        * do DMA address check in tg3_start_xmit().
+        */
+       if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
+               persist_dma_mask = dma_mask = DMA_32BIT_MASK;
+       else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
+               persist_dma_mask = dma_mask = DMA_40BIT_MASK;
+#ifdef CONFIG_HIGHMEM
+               dma_mask = DMA_64BIT_MASK;
+#endif
+       } else
+               persist_dma_mask = dma_mask = DMA_64BIT_MASK;
+
+       /* Configure DMA attributes. */
+       if (dma_mask > DMA_32BIT_MASK) {
+               err = pci_set_dma_mask(pdev, dma_mask);
+               if (!err) {
+                       dev->features |= NETIF_F_HIGHDMA;
+                       err = pci_set_consistent_dma_mask(pdev,
+                                                         persist_dma_mask);
+                       if (err < 0) {
+                               printk(KERN_ERR PFX "Unable to obtain 64 bit "
+                                      "DMA for consistent allocations\n");
+                               goto err_out_iounmap;
+                       }
+               }
+       }
+       if (err || dma_mask == DMA_32BIT_MASK) {
+               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               if (err) {
+                       printk(KERN_ERR PFX "No usable DMA configuration, "
+                              "aborting.\n");
+                       goto err_out_iounmap;
+               }
        }
 
+       tg3_init_bufmgr_config(tp);
+
 #if TG3_TSO_SUPPORT != 0
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+               tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+       }
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
            tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
-           ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 &&
-            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)) {
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
                tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
        } else {
                tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
        }
 
-       /* TSO is off by default, user can enable using ethtool.  */
-#if 0
-       if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
+       /* TSO is on by default on chips that support hardware TSO.
+        * Firmware TSO on older chips gives lower performance, so it
+        * is off by default, but can be enabled using ethtool.
+        */
+       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
                dev->features |= NETIF_F_TSO;
-#endif
+               if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
+                       dev->features |= NETIF_F_TSO6;
+       }
 
 #endif
 
@@ -8217,8 +11890,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                tp->rx_pending = 63;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
-               tp->pdev_peer = tg3_find_5704_peer(tp);
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
+               tp->pdev_peer = tg3_find_peer(tp);
 
        err = tg3_get_device_address(tp);
        if (err) {
@@ -8234,9 +11908,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
         */
        if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
            (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-               pci_save_state(tp->pdev, tp->pci_cfg_state);
+               pci_save_state(tp->pdev);
                tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
-               tg3_halt(tp);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        }
 
        err = tg3_test_dma(tp);
@@ -8249,13 +11923,28 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
         * checksumming.
         */
        if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
-               dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+                       dev->features |= NETIF_F_HW_CSUM;
+               else
+                       dev->features |= NETIF_F_IP_CSUM;
+               dev->features |= NETIF_F_SG;
                tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
        } else
                tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
 
-       if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
-               dev->features &= ~NETIF_F_HIGHDMA;
+       /* flow control autonegotiation is default behavior */
+       tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+
+       tg3_init_coal(tp);
+
+       /* Now that we have fully setup the chip, save away a snapshot
+        * of the PCI config space.  We need to restore this after
+        * GRC_MISC_CFG core clock resets and some resume events.
+        */
+       pci_save_state(tp->pdev);
+
+       pci_set_drvdata(pdev, dev);
 
        err = register_netdev(dev);
        if (err) {
@@ -8264,35 +11953,24 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_iounmap;
        }
 
-       pci_set_drvdata(pdev, dev);
-
-       /* Now that we have fully setup the chip, save away a snapshot
-        * of the PCI config space.  We need to restore this after
-        * GRC_MISC_CFG core clock resets and some resume events.
-        */
-       pci_save_state(tp->pdev, tp->pci_cfg_state);
-
-       printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
+       printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
               dev->name,
               tp->board_part_number,
               tp->pci_chip_rev_id,
               tg3_phy_string(tp),
-              ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
-              ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
-               ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
-               ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
-              ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
-              (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
+              tg3_bus_string(tp, str),
+              ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
+               ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
+                "10/100/1000Base-T")));
 
        for (i = 0; i < 6; i++)
                printk("%2.2x%c", dev->dev_addr[i],
                       i == 5 ? '\n' : ':');
 
-       printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
+       printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
               "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
               "TSOcap[%d] \n",
               dev->name,
-              (tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
               (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
               (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
               (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
@@ -8300,11 +11978,18 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
               (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
               (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
               (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+       printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+              dev->name, tp->dma_rwctrl,
+              (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
+               (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
 
        return 0;
 
 err_out_iounmap:
-       iounmap((void *) tp->regs);
+       if (tp->regs) {
+               iounmap(tp->regs);
+               tp->regs = NULL;
+       }
 
 err_out_free_dev:
        free_netdev(dev);
@@ -8325,8 +12010,12 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
        if (dev) {
                struct tg3 *tp = netdev_priv(dev);
 
+               flush_scheduled_work();
                unregister_netdev(dev);
-               iounmap((void *)tp->regs);
+               if (tp->regs) {
+                       iounmap(tp->regs);
+                       tp->regs = NULL;
+               }
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
@@ -8334,7 +12023,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
        }
 }
 
-static int tg3_suspend(struct pci_dev *pdev, u32 state)
+static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct tg3 *tp = netdev_priv(dev);
@@ -8343,39 +12032,38 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state)
        if (!netif_running(dev))
                return 0;
 
+       flush_scheduled_work();
        tg3_netif_stop(tp);
 
        del_timer_sync(&tp->timer);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
        tg3_disable_ints(tp);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        netif_device_detach(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
-       tg3_halt(tp);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_lock(tp, 0);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+       tg3_full_unlock(tp);
 
-       err = tg3_set_power_state(tp, state);
+       err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
        if (err) {
-               spin_lock_irq(&tp->lock);
-               spin_lock(&tp->tx_lock);
+               tg3_full_lock(tp, 0);
 
-               tg3_init_hw(tp);
+               tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+               if (tg3_restart_hw(tp, 1))
+                       goto out;
 
                tp->timer.expires = jiffies + tp->timer_offset;
                add_timer(&tp->timer);
 
-               spin_unlock(&tp->tx_lock);
-               spin_unlock_irq(&tp->lock);
-
                netif_device_attach(dev);
                tg3_netif_start(tp);
+
+out:
+               tg3_full_unlock(tp);
        }
 
        return err;
@@ -8390,30 +12078,30 @@ static int tg3_resume(struct pci_dev *pdev)
        if (!netif_running(dev))
                return 0;
 
-       pci_restore_state(tp->pdev, tp->pci_cfg_state);
+       pci_restore_state(tp->pdev);
 
-       err = tg3_set_power_state(tp, 0);
+       err = tg3_set_power_state(tp, PCI_D0);
        if (err)
                return err;
 
        netif_device_attach(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
-       tg3_init_hw(tp);
+       tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+       err = tg3_restart_hw(tp, 1);
+       if (err)
+               goto out;
 
        tp->timer.expires = jiffies + tp->timer_offset;
        add_timer(&tp->timer);
 
-       tg3_enable_ints(tp);
-
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
-
        tg3_netif_start(tp);
 
-       return 0;
+out:
+       tg3_full_unlock(tp);
+
+       return err;
 }
 
 static struct pci_driver tg3_driver = {
@@ -8427,7 +12115,7 @@ static struct pci_driver tg3_driver = {
 
 static int __init tg3_init(void)
 {
-       return pci_module_init(&tg3_driver);
+       return pci_register_driver(&tg3_driver);
 }
 
 static void __exit tg3_cleanup(void)