--- /build/BUILD/kernel-2.6.32-131.0.15.el6/linux-2.6.32-27.mlab.mlab.i686/drivers/net/tg3.c 2012-06-19 17:19:33.115164893 -0400 +++ linux-2.6.32-220.el6/drivers/net/tg3.c 2011-11-08 16:06:47.000000000 -0500 @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2010 Broadcom Corporation. + * Copyright (C) 2005-2011 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, @@ -48,9 +48,9 @@ #include #include -#include +#include #include -#include +#include #ifdef CONFIG_SPARC #include @@ -68,12 +68,36 @@ #include "tg3.h" +/* Functions & macros to verify TG3_FLAGS types */ + +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) +{ + return test_bit(flag, bits); +} + +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) +{ + set_bit(flag, bits); +} + +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) +{ + clear_bit(flag, bits); +} + +#define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 113 +#define TG3_MIN_NUM 119 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "August 2, 2010" +#define DRV_MODULE_RELDATE "May 18, 2011" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -88,29 +112,30 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ + #define TG3_TX_TIMEOUT (5 * HZ) /* hardware minimum and maximum for a single frame's data payload */ #define TG3_MIN_MTU 60 #define TG3_MAX_MTU(tp) \ - ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) /* These numbers seem to be hard coded in the NIC firmware somehow. * You can't change the ring sizes, but you can change where you place * them in the NIC onboard memory. */ #define TG3_RX_STD_RING_SIZE(tp) \ - ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \ - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \ - RX_STD_MAX_SIZE_5717 : 512) + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) #define TG3_DEF_RX_RING_PENDING 200 #define TG3_RX_JMB_RING_SIZE(tp) \ - ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \ - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \ - 1024 : 256) + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) #define TG3_DEF_RX_JUMBO_RING_PENDING 100 #define TG3_RSS_INDIR_TBL_SIZE 128 @@ -176,11 +201,6 @@ #define TG3_RAW_IP_ALIGN 2 -/* number of ETHTOOL_GSTATS u64's */ -#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) - -#define TG3_NUM_TEST 6 - #define TG3_FW_UPDATE_TIMEOUT_SEC 5 #define FIRMWARE_TG3 "tigon/tg3.bin" @@ -268,7 +288,6 @@ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, @@ -276,6 +295,7 @@ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -290,7 +310,7 @@ static const struct { const char string[ETH_GSTRING_LEN]; -} ethtool_stats_keys[TG3_NUM_STATS] = { +} ethtool_stats_keys[] = { { "rx_octets" }, { "rx_fragments" }, { "rx_ucast_packets" }, @@ -366,12 +386,17 @@ { "ring_status_update" }, { "nic_irqs" }, { "nic_avoided_irqs" }, - { "nic_tx_threshold_hit" } + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, }; +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + + static const struct { const char string[ETH_GSTRING_LEN]; -} ethtool_test_keys[TG3_NUM_TEST] = { +} ethtool_test_keys[] = { { "nvram test (online) " }, { "link test (online) " }, { "register test (offline)" }, @@ -380,6 +405,9 @@ { "interrupt test (offline)" }, }; +#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + static void tg3_write32(struct tg3 *tp, u32 off, u32 val) { writel(val, tp->regs + off); @@ -477,8 +505,7 @@ */ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) { - if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || - (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) /* Non-posted methods */ tp->write32(tp, off, val); else { @@ -498,8 +525,7 @@ static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) { tp->write32_mbox(tp, off, val); - if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && - !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) + if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) tp->read32_mbox(tp, off); } @@ -507,9 +533,9 @@ { void __iomem *mbox = tp->regs + off; writel(val, mbox); - if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) + if (tg3_flag(tp, TXD_MBOX_HWBUG)) writel(val, mbox); - if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) + if (tg3_flag(tp, MBOX_WRITE_REORDER)) readl(mbox); } @@ -538,12 +564,12 @@ { unsigned long flags; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) return; spin_lock_irqsave(&tp->indirect_lock, flags); - if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { + if (tg3_flag(tp, SRAM_USE_CONFIG)) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); @@ -563,14 +589,14 @@ { unsigned long flags; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { *val = 0; return; } spin_lock_irqsave(&tp->indirect_lock, flags); - if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { + if (tg3_flag(tp, SRAM_USE_CONFIG)) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); @@ -607,7 +633,7 @@ int ret = 0; u32 status, req, gnt; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + if (!tg3_flag(tp, ENABLE_APE)) return 0; switch (locknum) { @@ -653,7 +679,7 @@ { u32 gnt; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + if (!tg3_flag(tp, ENABLE_APE)) return; switch (locknum) { @@ -697,14 +723,14 @@ struct tg3_napi *tnapi = &tp->napi[i]; tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + if (tg3_flag(tp, 1SHOT_MSI)) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); tp->coal_now |= tnapi->coal_now; } /* Force an initial interrupt */ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && + if (!tg3_flag(tp, TAGGED_STATUS) && (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); else @@ -720,9 +746,7 @@ unsigned int work_exists = 0; /* check for phy events */ - if (!(tp->tg3_flags & - (TG3_FLAG_USE_LINKCHG_REG | - TG3_FLAG_POLL_SERDES))) { + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { if (sblk->status & SD_STATUS_LINK_CHG) work_exists = 1; } @@ -750,55 +774,17 @@ * The last_tag we write above tells the chip which piece of * work we've completed. */ - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && - tg3_has_work(tnapi)) + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) tw32(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | tnapi->coal_now); } -static void tg3_napi_disable(struct tg3 *tp) -{ - int i; - - for (i = tp->irq_cnt - 1; i >= 0; i--) - napi_disable(&tp->napi[i].napi); -} - -static void tg3_napi_enable(struct tg3 *tp) -{ - int i; - - for (i = 0; i < tp->irq_cnt; i++) - napi_enable(&tp->napi[i].napi); -} - -static inline void tg3_netif_stop(struct tg3 *tp) -{ - tp->dev->trans_start = jiffies; /* prevent tx timeout */ - tg3_napi_disable(tp); - netif_tx_disable(tp->dev); -} - -static inline void tg3_netif_start(struct tg3 *tp) -{ - /* NOTE: unconditional netif_tx_wake_all_queues is only - * appropriate so long as all callers are assured to - * have free tx slots (such as after tg3_init_hw) - */ - netif_tx_wake_all_queues(tp->dev); - - tg3_napi_enable(tp); - tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; - tg3_enable_ints(tp); -} - static void tg3_switch_clocks(struct tg3 *tp) { u32 clock_ctrl; u32 orig_clock_ctrl; - if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) return; clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); @@ -809,7 +795,7 @@ 0x1f); tp->pci_clock_ctrl = clock_ctrl; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl | CLOCK_CTRL_625_CORE, 40); @@ -926,6 +912,104 @@ return ret; } +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; +} + +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) +{ + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); +} + +#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ + MII_TG3_AUXCTL_ACTL_TX_6DB) + +#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_TX_6DB); + static int tg3_bmcr_reset(struct tg3 *tp) { u32 phy_control; @@ -1028,7 +1112,7 @@ return; } - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) val |= MAC_PHYCFG2_EMODE_MASK_MASK | MAC_PHYCFG2_FMODE_MASK_MASK | MAC_PHYCFG2_GMODE_MASK_MASK | @@ -1041,10 +1125,10 @@ val = tr32(MAC_PHYCFG1); val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; } val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | @@ -1059,13 +1143,13 @@ MAC_RGMII_MODE_TX_ENABLE | MAC_RGMII_MODE_TX_LOWPWR | MAC_RGMII_MODE_TX_RESET); - if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) val |= MAC_RGMII_MODE_RX_INT_B | MAC_RGMII_MODE_RX_QUALITY | MAC_RGMII_MODE_RX_ACTIVITY | MAC_RGMII_MODE_RX_ENG_DET; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) val |= MAC_RGMII_MODE_TX_ENABLE | MAC_RGMII_MODE_TX_LOWPWR | MAC_RGMII_MODE_TX_RESET; @@ -1079,7 +1163,7 @@ tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); - if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && + if (tg3_flag(tp, MDIOBUS_INITED) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_mdio_config_5785(tp); } @@ -1090,8 +1174,7 @@ u32 reg; struct phy_device *phydev; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + if (tg3_flag(tp, 5717_PLUS)) { u32 is_serdes; tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; @@ -1108,8 +1191,7 @@ tg3_mdio_start(tp); - if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || - (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) return 0; tp->mdio_bus = mdiobus_alloc(); @@ -1165,11 +1247,11 @@ PHY_BRCM_RX_REFCLK_UNUSED | PHY_BRCM_DIS_TXCRXC_NOENRGY | PHY_BRCM_AUTO_PWRDWN_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE) + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; /* fallthru */ case PHY_ID_RTL8211C: @@ -1183,7 +1265,7 @@ break; } - tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; + tg3_flag_set(tp, MDIOBUS_INITED); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_mdio_config_5785(tp); @@ -1193,59 +1275,13 @@ static void tg3_mdio_fini(struct tg3 *tp) { - if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { - tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); } } -static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, - MII_TG3_MMD_CTRL_DATA_NOINC | devad); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); - -done: - return err; -} - -static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, - MII_TG3_MMD_CTRL_DATA_NOINC | devad); - if (err) - goto done; - - err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); - -done: - return err; -} - /* tp->lock is held. */ static inline void tg3_generate_fw_event(struct tg3 *tp) { @@ -1293,8 +1329,7 @@ u32 reg; u32 val; - if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) return; tg3_wait_for_event_ack(tp); @@ -1354,6 +1389,11 @@ "on" : "off", (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + tg3_ump_link_report(tp); } } @@ -1419,13 +1459,12 @@ u32 old_rx_mode = tp->rx_mode; u32 old_tx_mode = tp->tx_mode; - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) + if (tg3_flag(tp, USE_PHYLIB)) autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; else autoneg = tp->link_config.autoneg; - if (autoneg == AUTONEG_ENABLE && - (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); else @@ -1622,28 +1661,6 @@ } } -static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - if (!err) - err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); - - return err; -} - -static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - if (!err) - err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); - - return err; -} - static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) { u32 phytest; @@ -1668,9 +1685,8 @@ { u32 reg; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) && + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) return; @@ -1704,7 +1720,7 @@ { u32 phy; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + if (!tg3_flag(tp, 5705_PLUS) || (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) return; @@ -1726,31 +1742,33 @@ tg3_writephy(tp, MII_TG3_FET_TEST, ephy); } } else { - phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | - MII_TG3_AUXCTL_SHDWSEL_MISC; - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { if (enable) phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; else phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; - phy |= MII_TG3_AUXCTL_MISC_WREN; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); } } } static void tg3_phy_set_wirespeed(struct tg3 *tp) { + int ret; u32 val; if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) return; - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) - tg3_writephy(tp, MII_TG3_AUX_CTRL, - (val | (1 << 15) | (1 << 4))); + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); } static void tg3_phy_apply_otp(struct tg3 *tp) @@ -1762,11 +1780,8 @@ otp = tp->phy_otp; - /* Enable SM_DSP clock and tx 6dB coding. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + return; phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; @@ -1790,10 +1805,7 @@ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); - /* Turn off SM_DSP clock. */ - phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) @@ -1833,6 +1845,23 @@ } } +static void tg3_phy_eee_enable(struct tg3 *tp) +{ + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); +} + static int tg3_wait_macro_done(struct tg3 *tp) { int limit = 100; @@ -1971,8 +2000,9 @@ (MII_TG3_CTRL_AS_MASTER | MII_TG3_CTRL_ENABLE_AS_MASTER)); - /* Enable SM_DSP_CLOCK and 6dB. */ - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (err) + return err; /* Block the PHY control access. */ tg3_phydsp_write(tp, 0x8005, 0x0800); @@ -1991,13 +2021,7 @@ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - /* Set Extended packet length bit for jumbo frames */ - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); - } else { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); @@ -2015,19 +2039,16 @@ */ static int tg3_phy_reset(struct tg3 *tp) { - u32 cpmuctrl; - u32 phy_status; + u32 val, cpmuctrl; int err; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val; - val = tr32(GRC_MISC_CFG); tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); udelay(40); } - err = tg3_readphy(tp, MII_BMSR, &phy_status); - err |= tg3_readphy(tp, MII_BMSR, &phy_status); + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); if (err != 0) return -EBUSY; @@ -2059,18 +2080,14 @@ return err; if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { - u32 phy; - - phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; - tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy); + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); tw32(TG3_CPMU_CTRL, cpmuctrl); } if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { - u32 val; - val = tr32(TG3_CPMU_LSPD_1000MB_CLK); if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == CPMU_LSPD_1000MB_MACCLK_12_5) { @@ -2080,8 +2097,7 @@ } } - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) && + if (tg3_flag(tp, 5717_PLUS) && (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) return 0; @@ -2093,56 +2109,60 @@ tg3_phy_toggle_apd(tp, false); out: - if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { tg3_phydsp_write(tp, 0x201f, 0x2aaa); tg3_phydsp_write(tp, 0x000a, 0x0323); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); } + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_phydsp_write(tp, 0x000a, 0x310b); - tg3_phydsp_write(tp, 0x201f, 0x9506); - tg3_phydsp_write(tp, 0x401f, 0x14e2); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); - tg3_writephy(tp, MII_TG3_TEST1, - MII_TG3_TEST1_TRIM_EN | 0x4); - } else - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } } + /* Set Extended packet length bit (bit 14) on all chips that */ /* support jumbo frames */ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { /* Cannot do read-modify-write on 5401 */ - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); - } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - u32 phy_reg; - + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { /* Set bit 14 with read-modify-write to preserve other bits */ - if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && - !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) - tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); } /* Set phy register 0x10 bit 0 to high fifo elasticity to support * jumbo frames transmission. */ - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - u32 phy_reg; - - if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) tg3_writephy(tp, MII_TG3_EXT_CTRL, - phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { @@ -2155,120 +2175,158 @@ return 0; } -static void tg3_frob_aux_power(struct tg3 *tp) +static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) { - struct tg3 *tp_peer = tp; + if (!tg3_flag(tp, IS_NIC)) + return 0; - /* The GPIOs do something completely different on 57765. */ - if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + return 0; +} + +static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) +{ + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) return; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - struct net_device *dev_peer; + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; - dev_peer = pci_get_drvdata(tp->pdev_peer); - /* remove_one() may have been run on the peer. */ - if (!dev_peer) - tp_peer = tp; - else - tp_peer = netdev_priv(dev_peer); - } + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); - if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || - (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || - (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE0 | + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); +} + +static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1), - 100); - } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { - /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ - u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1 | - tp->grc_local_ctrl; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); - - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); - grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); - } else { - u32 no_gpio2; - u32 grc_local_ctrl = 0; + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); - if (tp_peer != tp && - (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) - return; + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; - /* Workaround to prevent overdrawing Amps. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5714) { - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); - } + /* Workaround to prevent overdrawing Amps. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } - /* On 5753 and variants, GPIO2 cannot be used. */ - no_gpio2 = tp->nic_sram_data_cfg & - NIC_SRAM_DATA_CFG_NO_GPIO2; + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT1 | - GRC_LCLCTRL_GPIO_OUTPUT2; - if (no_gpio2) { - grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT2); - } - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); - if (!no_gpio2) { - grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, 100); - } + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); } - } else { - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - if (tp_peer != tp && - (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) - return; + } +} - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OUTPUT1), 100); +static void tg3_frob_aux_power(struct tg3 *tp) +{ + bool need_vaux = false; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - GRC_LCLCTRL_GPIO_OE1, 100); + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + return; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OUTPUT1), 100); + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) && + tp->pdev_peer != tp->pdev) { + + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if (tg3_flag(tp_peer, WOL_ENABLE) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; } } + + if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); } static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) @@ -2340,11 +2398,10 @@ tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF); - tg3_writephy(tp, MII_TG3_AUX_CTRL, - MII_TG3_AUXCTL_SHDWSEL_PWRCTL | - MII_TG3_AUXCTL_PCTL_100TX_LPWR | - MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | - MII_TG3_AUXCTL_PCTL_VREG_11V); + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); } /* The PHY should not be powered down on some chips because @@ -2370,7 +2427,7 @@ /* tp->lock is held. */ static int tg3_nvram_lock(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_NVRAM) { + if (tg3_flag(tp, NVRAM)) { int i; if (tp->nvram_lock_cnt == 0) { @@ -2393,7 +2450,7 @@ /* tp->lock is held. */ static void tg3_nvram_unlock(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_NVRAM) { + if (tg3_flag(tp, NVRAM)) { if (tp->nvram_lock_cnt > 0) tp->nvram_lock_cnt--; if (tp->nvram_lock_cnt == 0) @@ -2404,8 +2461,7 @@ /* tp->lock is held. */ static void tg3_enable_nvram_access(struct tg3 *tp) { - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); @@ -2415,8 +2471,7 @@ /* tp->lock is held. */ static void tg3_disable_nvram_access(struct tg3 *tp) { - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); @@ -2486,10 +2541,10 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) { - if ((tp->tg3_flags & TG3_FLAG_NVRAM) && - (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr / tp->nvram_pagesize) << @@ -2501,10 +2556,10 @@ static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) { - if ((tp->tg3_flags & TG3_FLAG_NVRAM) && - (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * @@ -2524,7 +2579,7 @@ { int ret; - if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) + if (!tg3_flag(tp, NVRAM)) return tg3_nvram_read_using_eeprom(tp, offset, val); offset = tg3_nvram_phys_addr(tp, offset); @@ -2599,42 +2654,37 @@ tw32(MAC_TX_BACKOFF_SEED, addr_high); } -static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) +static void tg3_enable_register_access(struct tg3 *tp) { - u32 misc_host_ctrl; - bool device_should_wake, do_low_power; - - /* Make sure register accesses (indirect or otherwise) - * will function correctly. + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. */ pci_write_config_dword(tp->pdev, - TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); +} - switch (state) { - case PCI_D0: - pci_enable_wake(tp->pdev, state, false); - pci_set_power_state(tp->pdev, PCI_D0); +static int tg3_power_up(struct tg3 *tp) +{ + tg3_enable_register_access(tp); - /* Switch out of Vaux if it is a NIC */ - if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); + pci_set_power_state(tp->pdev, PCI_D0); - return 0; + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); - case PCI_D1: - case PCI_D2: - case PCI_D3hot: - break; + return 0; +} - default: - netdev_err(tp->dev, "Invalid power state (D%d) requested\n", - state); - return -EINVAL; - } +static int tg3_power_down_prepare(struct tg3 *tp) +{ + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); /* Restore the CLKREQ setting. */ - if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { + if (tg3_flag(tp, CLKREQ_BUG)) { u16 lnkctl; pci_read_config_word(tp->pdev, @@ -2650,11 +2700,10 @@ tw32(TG3PCI_MISC_HOST_CTRL, misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); - device_should_wake = pci_pme_capable(tp->pdev, state) && - device_may_wakeup(&tp->pdev->dev) && - (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { do_low_power = false; if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { @@ -2675,9 +2724,8 @@ ADVERTISED_Autoneg | ADVERTISED_10baseT_Half; - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - device_should_wake) { - if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) advertising |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | @@ -2722,7 +2770,7 @@ val = tr32(GRC_VCPU_EXT_CTRL); tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); - } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { + } else if (!tg3_flag(tp, ENABLE_ASF)) { int i; u32 val; @@ -2733,7 +2781,7 @@ msleep(1); } } - if (tp->tg3_flags & TG3_FLAG_WOL_CAP) + if (tg3_flag(tp, WOL_CAP)) tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | WOL_DRV_STATE_SHUTDOWN | WOL_DRV_WOL | @@ -2743,8 +2791,13 @@ u32 mac_mode; if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { - if (do_low_power) { - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); udelay(40); } @@ -2756,8 +2809,7 @@ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - u32 speed = (tp->tg3_flags & - TG3_FLAG_WOL_SPEED_100MB) ? + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? SPEED_100 : SPEED_10; if (tg3_5700_link_polarity(tp, speed)) mac_mode |= MAC_MODE_LINK_POLARITY; @@ -2768,22 +2820,18 @@ mac_mode = MAC_MODE_PORT_MODE_TBI; } - if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) + if (!tg3_flag(tp, 5750_PLUS)) tw32(MAC_LED_CTRL, tp->led_ctrl); mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; - if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && - ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - mac_mode |= tp->mac_mode & - (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); - if (mac_mode & MAC_MODE_APE_TX_EN) - mac_mode |= MAC_MODE_TDE_ENABLE; - } + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; tw32_f(MAC_MODE, mac_mode); udelay(100); @@ -2792,7 +2840,7 @@ udelay(10); } - if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && + if (!tg3_flag(tp, WOL_SPEED_100MB) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { u32 base_val; @@ -2803,12 +2851,11 @@ tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | CLOCK_CTRL_PWRDOWN_PLL133, 40); - } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* do nothing */ - } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { u32 newbits1, newbits2; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -2817,7 +2864,7 @@ CLOCK_CTRL_TXCLK_DISABLE | CLOCK_CTRL_ALTCLK); newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + } else if (tg3_flag(tp, 5705_PLUS)) { newbits1 = CLOCK_CTRL_625_CORE; newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; } else { @@ -2831,7 +2878,7 @@ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 40); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { u32 newbits3; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -2848,8 +2895,7 @@ } } - if (!(device_should_wake) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) tg3_power_down_phy(tp, do_low_power); tg3_frob_aux_power(tp); @@ -2861,7 +2907,7 @@ val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); tw32(0x7d00, val); - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { + if (!tg3_flag(tp, ENABLE_ASF)) { int err; err = tg3_nvram_lock(tp); @@ -2873,13 +2919,15 @@ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); - if (device_should_wake) - pci_enable_wake(tp->pdev, state, true); + return 0; +} - /* Finally, set the new power state. */ - pci_set_power_state(tp->pdev, state); +static void tg3_power_down(struct tg3 *tp) +{ + tg3_power_down_prepare(tp); - return 0; + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); } static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) @@ -2929,130 +2977,130 @@ } } -static void tg3_phy_copper_begin(struct tg3 *tp) +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) { - u32 new_adv; - int i; + int err = 0; + u32 val, new_adv; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { - /* Entering low power mode. Disable gigabit and - * 100baseT advertisements. - */ - tg3_writephy(tp, MII_TG3_CTRL, 0); + new_adv = ADVERTISE_CSMA; + if (advertise & ADVERTISED_10baseT_Half) + new_adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + new_adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + new_adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + new_adv |= ADVERTISE_100FULL; - new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); - if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) - new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); + new_adv |= tg3_advert_flowctrl_1000T(flowctrl); - tg3_writephy(tp, MII_ADVERTISE, new_adv); - } else if (tp->link_config.speed == SPEED_INVALID) { - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - tp->link_config.advertising &= - ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; - new_adv = ADVERTISE_CSMA; - if (tp->link_config.advertising & ADVERTISED_10baseT_Half) - new_adv |= ADVERTISE_10HALF; - if (tp->link_config.advertising & ADVERTISED_10baseT_Full) - new_adv |= ADVERTISE_10FULL; - if (tp->link_config.advertising & ADVERTISED_100baseT_Half) - new_adv |= ADVERTISE_100HALF; - if (tp->link_config.advertising & ADVERTISED_100baseT_Full) - new_adv |= ADVERTISE_100FULL; + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + goto done; - new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + new_adv = 0; + if (advertise & ADVERTISED_1000baseT_Half) + new_adv |= MII_TG3_CTRL_ADV_1000_HALF; + if (advertise & ADVERTISED_1000baseT_Full) + new_adv |= MII_TG3_CTRL_ADV_1000_FULL; - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= (MII_TG3_CTRL_AS_MASTER | + MII_TG3_CTRL_ENABLE_AS_MASTER); - if (tp->link_config.advertising & - (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { - new_adv = 0; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= MII_TG3_CTRL_ADV_1000_HALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= MII_TG3_CTRL_ADV_1000_FULL; - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) && - (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) - new_adv |= (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER); - tg3_writephy(tp, MII_TG3_CTRL, new_adv); - } else { - tg3_writephy(tp, MII_TG3_CTRL, 0); - } - } else { - new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); - new_adv |= ADVERTISE_CSMA; + err = tg3_writephy(tp, MII_TG3_CTRL, new_adv); + if (err) + goto done; - /* Asking for a specific link mode. */ - if (tp->link_config.speed == SPEED_1000) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = MII_TG3_CTRL_ADV_1000_FULL; - else - new_adv = MII_TG3_CTRL_ADV_1000_HALF; - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER); - } else { - if (tp->link_config.speed == SPEED_100) { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv |= ADVERTISE_100FULL; - else - new_adv |= ADVERTISE_100HALF; - } else { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv |= ADVERTISE_10FULL; - else - new_adv |= ADVERTISE_10HALF; - } - tg3_writephy(tp, MII_ADVERTISE, new_adv); + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); - new_adv = 0; + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; + + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + /* Fall through */ + case ASIC_REV_5719: + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); } - tg3_writephy(tp, MII_TG3_CTRL, new_adv); + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; } - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { - u32 val; +done: + return err; +} - tw32(TG3_CPMU_EEE_MODE, - tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); +static void tg3_phy_copper_begin(struct tg3 *tp) +{ + u32 new_adv; + int i; - /* Enable SM_DSP clock and tx 6dB coding. */ - val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && - !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) - tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, - val | MII_TG3_DSP_CH34TP2_HIBW01); + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); + } else if (tp->link_config.speed == SPEED_INVALID) { + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->link_config.advertising &= + ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); - val = 0; - if (tp->link_config.autoneg == AUTONEG_ENABLE) { - /* Advertise 100-BaseTX EEE ability */ - if (tp->link_config.advertising & - ADVERTISED_100baseT_Full) - val |= MDIO_AN_EEE_ADV_100TX; - /* Advertise 1000-BaseT EEE ability */ - if (tp->link_config.advertising & - ADVERTISED_1000baseT_Full) - val |= MDIO_AN_EEE_ADV_1000T; + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + } else { + /* Asking for a specific link mode. */ + if (tp->link_config.speed == SPEED_1000) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_1000baseT_Full; + else + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; } - tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); - /* Turn off SM_DSP clock. */ - val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); } if (tp->link_config.autoneg == AUTONEG_DISABLE && @@ -3110,7 +3158,7 @@ /* Turn off tap power management. */ /* Set Extended packet length bit */ - err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); err |= tg3_phydsp_write(tp, 0x0012, 0x1804); err |= tg3_phydsp_write(tp, 0x0013, 0x1204); @@ -3173,7 +3221,7 @@ if (curadv != reqadv) return 0; - if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) + if (tg3_flag(tp, PAUSE_AUTONEG)) tg3_readphy(tp, MII_LPA, rmtadv); } else { /* Reprogram the advertisement register, even if it @@ -3195,7 +3243,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) { int current_link_up; - u32 bmsr, dummy; + u32 bmsr, val; u32 lcl_adv, rmt_adv; u16 current_speed; u8 current_duplex; @@ -3216,7 +3264,7 @@ udelay(80); } - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); /* Some third-party PHYs need to be reset on link going * down. @@ -3236,7 +3284,7 @@ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { tg3_readphy(tp, MII_BMSR, &bmsr); if (tg3_readphy(tp, MII_BMSR, &bmsr) || - !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) + !tg3_flag(tp, INIT_COMPLETE)) bmsr = 0; if (!(bmsr & BMSR_LSTATUS)) { @@ -3275,8 +3323,8 @@ } /* Clear pending interrupts... */ - tg3_readphy(tp, MII_TG3_ISTAT, &dummy); - tg3_readphy(tp, MII_TG3_ISTAT, &dummy); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); @@ -3297,13 +3345,13 @@ current_duplex = DUPLEX_INVALID; if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { - u32 val; - - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); - tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); - if (!(val & (1 << 10))) { - val |= (1 << 10); - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); goto relink; } } @@ -3373,13 +3421,11 @@ relink: if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - u32 tmp; - tg3_phy_copper_begin(tp); - tg3_readphy(tp, MII_BMSR, &tmp); - if (!tg3_readphy(tp, MII_BMSR, &tmp) && - (tmp & BMSR_LSTATUS)) + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) current_link_up = 1; } @@ -3422,7 +3468,7 @@ tg3_phy_eee_adjust(tp, current_link_up); - if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { + if (tg3_flag(tp, USE_LINKCHG_REG)) { /* Polled via timer. */ tw32_f(MAC_EVENT, 0); } else { @@ -3433,8 +3479,7 @@ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && current_link_up == 1 && tp->link_config.active_speed == SPEED_1000 && - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || - (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { udelay(120); tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | @@ -3446,7 +3491,7 @@ } /* Prevent send BD corruption. */ - if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { + if (tg3_flag(tp, CLKREQ_BUG)) { u16 oldlnkctl, newlnkctl; pci_read_config_word(tp->pdev, @@ -3841,7 +3886,7 @@ int i; /* Reset when initting first time or we have a link. */ - if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && + if (tg3_flag(tp, INIT_COMPLETE) && !(mac_status & MAC_STATUS_PCS_SYNCED)) return; @@ -4102,9 +4147,9 @@ orig_active_speed = tp->link_config.active_speed; orig_active_duplex = tp->link_config.active_duplex; - if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && + if (!tg3_flag(tp, HW_AUTONEG) && netif_carrier_ok(tp->dev) && - (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { + tg3_flag(tp, INIT_COMPLETE)) { mac_status = tr32(MAC_STATUS); mac_status &= (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET | @@ -4135,7 +4180,7 @@ current_link_up = 0; mac_status = tr32(MAC_STATUS); - if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) + if (tg3_flag(tp, HW_AUTONEG)) current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); else current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); @@ -4334,7 +4379,7 @@ current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; - } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + } else if (!tg3_flag(tp, 5780_CLASS)) { /* Link is up via parallel detect */ } else { current_link_up = 0; @@ -4431,6 +4476,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) { + u32 val; int err; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) @@ -4441,7 +4487,7 @@ err = tg3_setup_copper_phy(tp, force_reset); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { - u32 val, scale; + u32 scale; val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) @@ -4456,19 +4502,22 @@ tw32(GRC_MISC_CFG, val); } + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + if (tp->link_config.active_speed == SPEED_1000 && tp->link_config.active_duplex == DUPLEX_HALF) - tw32(MAC_TX_LENGTHS, - ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); else - tw32(MAC_TX_LENGTHS, - ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { if (netif_carrier_ok(tp->dev)) { tw32(HOSTCC_STAT_COAL_TICKS, tp->coal.stats_block_coalesce_usecs); @@ -4477,8 +4526,8 @@ } } - if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { - u32 val = tr32(PCIE_PWR_MGMT_THRESH); + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); if (!netif_carrier_ok(tp->dev)) val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | tp->pwrmgmt_thresh; @@ -4490,6 +4539,128 @@ return err; } +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} + +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) +{ + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); +} + +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) +{ + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +} + +static void tg3_dump_state(struct tg3 *tp) +{ + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } +} + /* This is called whenever we suspect that the system chipset is re- * ordering the sequence of MMIO to the tx send mailbox. The symptom * is bogus tx completions. We try to recover by setting the @@ -4498,7 +4669,7 @@ */ static void tg3_tx_recover(struct tg3 *tp) { - BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || tp->write32_tx_mbox == tg3_write_indirect_mbox); netdev_warn(tp->dev, @@ -4508,7 +4679,7 @@ "and include system chipset information.\n"); spin_lock(&tp->lock); - tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; + tg3_flag_set(tp, TX_RECOVERY_PENDING); spin_unlock(&tp->lock); } @@ -4532,7 +4703,7 @@ struct netdev_queue *txq; int index = tnapi - tp->napi; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) index--; txq = netdev_get_tx_queue(tp->dev, index); @@ -4621,12 +4792,11 @@ u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; - struct ring_info *map, *src_map; + struct ring_info *map; struct sk_buff *skb; dma_addr_t mapping; int skb_size, dest_idx; - src_map = NULL; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; @@ -4686,7 +4856,7 @@ struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; - struct tg3_rx_prodring_set *spr = &tp->prodring[0]; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; int dest_idx; switch (opaque_key) { @@ -4756,7 +4926,7 @@ u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; - struct tg3_rx_prodring_set *tpr = tnapi->prodring; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; hw_idx = *(tnapi->rx_rcb_prod_idx); /* @@ -4781,13 +4951,13 @@ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { - ri = &tp->prodring[0].rx_std_buffers[desc_idx]; + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; dma_addr = pci_unmap_addr(ri, mapping); skb = ri->skb; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; dma_addr = pci_unmap_addr(ri, mapping); skb = ri->skb; post_ptr = &jmb_prod_idx; @@ -4850,13 +5020,13 @@ skb = copy_skb; } - if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && + if ((tg3_flag(tp, RX_CHECKSUMS)) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) >> RXD_TCPCSUM_SHIFT) == 0xffff)) skb->ip_summed = CHECKSUM_UNNECESSARY; else - skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, tp->dev); @@ -4922,7 +5092,7 @@ tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { + if (!tg3_flag(tp, ENABLE_RSS)) { if (work_mask & RXD_OPAQUE_RING_STD) { tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; @@ -4955,16 +5125,14 @@ static void tg3_poll_link(struct tg3 *tp) { /* handle link change and other phy events */ - if (!(tp->tg3_flags & - (TG3_FLAG_USE_LINKCHG_REG | - TG3_FLAG_POLL_SERDES))) { + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { struct tg3_hw_status *sblk = tp->napi[0].hw_status; if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | (sblk->status & ~SD_STATUS_LINK_CHG); spin_lock(&tp->lock); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED | @@ -5111,7 +5279,7 @@ /* run TX completion thread */ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { tg3_tx(tnapi); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) return work_done; } @@ -5122,15 +5290,15 @@ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_done += tg3_rx(tnapi, budget - work_done); - if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { - struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; int i, err = 0; u32 std_prod_idx = dpr->rx_std_prod_idx; u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; for (i = 1; i < tp->irq_cnt; i++) err |= tg3_rx_prodring_xfer(tp, dpr, - tp->napi[i].prodring); + &tp->napi[i].prodring); wmb(); @@ -5161,7 +5329,7 @@ while (1) { work_done = tg3_poll_work(tnapi, work_done, budget); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) goto tx_recovery; if (unlikely(work_done >= budget)) @@ -5195,6 +5363,40 @@ return work_done; } +static void tg3_process_error(struct tg3 *tp) +{ + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + schedule_work(&tp->reset_task); +} + static int tg3_poll(struct napi_struct *napi, int budget) { struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); @@ -5203,17 +5405,20 @@ struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + tg3_poll_link(tp); work_done = tg3_poll_work(tnapi, work_done, budget); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) goto tx_recovery; if (unlikely(work_done >= budget)) break; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + if (tg3_flag(tp, TAGGED_STATUS)) { /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. @@ -5240,6 +5445,59 @@ return work_done; } +static void tg3_napi_disable(struct tg3 *tp) +{ + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +} + +static void tg3_napi_enable(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); +} + +static void tg3_napi_init(struct tg3 *tp) +{ + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); +} + +static void tg3_napi_fini(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); +} + +static inline void tg3_netif_stop(struct tg3 *tp) +{ + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); +} + +static inline void tg3_netif_start(struct tg3 *tp) +{ + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); +} + static void tg3_irq_quiesce(struct tg3 *tp) { int i; @@ -5253,11 +5511,6 @@ synchronize_irq(tp->napi[i].irq_vec); } -static inline int tg3_irq_sync(struct tg3 *tp) -{ - return tp->irq_sync; -} - /* Fully shutdown all tg3 driver activity elsewhere in the system. * If irq_sync is non-zero, then the IRQ handler must be synchronized * with as well. Most of the time, this is not necessary except when @@ -5332,7 +5585,7 @@ * interrupt is ours and will flush the status block. */ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { - if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || + if (tg3_flag(tp, CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; goto out; @@ -5381,7 +5634,7 @@ * interrupt is ours and will flush the status block. */ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { - if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || + if (tg3_flag(tp, CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; goto out; @@ -5494,14 +5747,14 @@ tg3_full_lock(tp, 1); - restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; - tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; + restart_timer = tg3_flag(tp, RESTART_TIMER); + tg3_flag_clear(tp, RESTART_TIMER); - if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; tp->write32_rx_mbox = tg3_write_flush_reg32; - tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; - tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); } tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); @@ -5521,21 +5774,13 @@ tg3_phy_start(tp); } -static void tg3_dump_short_state(struct tg3 *tp) -{ - netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", - tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); - netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", - tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); -} - static void tg3_tx_timeout(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); if (netif_msg_tx_err(tp)) { netdev_err(dev, "transmit timed out, resetting\n"); - tg3_dump_short_state(tp); + tg3_dump_state(tp); } schedule_work(&tp->reset_task); @@ -5546,8 +5791,7 @@ { u32 base = (u32) mapping & 0xffffffff; - return ((base > 0xffffdcc0) && - (base + len + 8 < base)); + return (base > 0xffffdcc0) && (base + len + 8 < base); } /* Test for DMA addresses > 40-bit */ @@ -5555,15 +5799,36 @@ int len) { #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) - if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) - return (((u64) mapping + len) > DMA_BIT_MASK(40)); + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); return 0; #else return 0; #endif } -static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); +static void tg3_set_txd(struct tg3_napi *tnapi, int entry, + dma_addr_t mapping, int len, u32 flags, + u32 mss_and_is_end) +{ + struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; + int is_end = (mss_and_is_end & 0x1); + u32 mss = (mss_and_is_end >> 1); + u32 vlan_tag = 0; + + if (is_end) + flags |= TXD_FLAG_END; + if (flags & TXD_FLAG_VLAN) { + vlan_tag = flags >> 16; + flags &= 0xffff; + } + vlan_tag |= (mss << TXD_MSS_SHIFT); + + txd->addr_hi = ((u64) mapping >> 32); + txd->addr_lo = ((u64) mapping & 0xffffffff); + txd->len_flags = (len << TXD_LEN_SHIFT) | flags; + txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; +} /* Workaround 4GB and 40-bit hardware DMA bugs. */ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, @@ -5602,8 +5867,8 @@ /* Make sure new skb does not cross any 4G boundaries. * Drop the packet if it does. */ - } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && - tg3_4g_overflow_test(new_addr, new_skb->len)) { + } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(new_addr, new_skb->len)) { pci_unmap_single(tp->pdev, new_addr, new_skb->len, PCI_DMA_TODEVICE); ret = -1; @@ -5646,204 +5911,7 @@ return ret; } -static void tg3_set_txd(struct tg3_napi *tnapi, int entry, - dma_addr_t mapping, int len, u32 flags, - u32 mss_and_is_end) -{ - struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; - int is_end = (mss_and_is_end & 0x1); - u32 mss = (mss_and_is_end >> 1); - u32 vlan_tag = 0; - - if (is_end) - flags |= TXD_FLAG_END; - if (flags & TXD_FLAG_VLAN) { - vlan_tag = flags >> 16; - flags &= 0xffff; - } - vlan_tag |= (mss << TXD_MSS_SHIFT); - - txd->addr_hi = ((u64) mapping >> 32); - txd->addr_lo = ((u64) mapping & 0xffffffff); - txd->len_flags = (len << TXD_LEN_SHIFT) | flags; - txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; -} - -/* hard_start_xmit for devices that don't have any bugs and - * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. - */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - u32 len, entry, base_flags, mss; - dma_addr_t mapping; - struct tg3_napi *tnapi; - struct netdev_queue *txq; - unsigned int i, last; - - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) - tnapi++; - - /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->napi.poll inside of a software - * interrupt. Furthermore, IRQ processing runs lockless so we have - * no IRQ context deadlocks to worry about either. Rejoice! - */ - if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); - } - return NETDEV_TX_BUSY; - } - - entry = tnapi->tx_prod; - base_flags = 0; - mss = skb_shinfo(skb)->gso_size; - if (mss) { - int tcp_opt_len, ip_tcp_len; - u32 hdrlen; - - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - goto out_unlock; - } - - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - hdrlen = skb_headlen(skb) - ETH_HLEN; - else { - struct iphdr *iph = ip_hdr(skb); - - tcp_opt_len = tcp_optlen(skb); - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - - iph->check = 0; - iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - hdrlen = ip_tcp_len + tcp_opt_len; - } - - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { - mss |= (hdrlen & 0xc) << 12; - if (hdrlen & 0x10) - base_flags |= 0x00000010; - base_flags |= (hdrlen & 0x3e0) << 5; - } else - mss |= hdrlen << 9; - - base_flags |= (TXD_FLAG_CPU_PRE_DMA | - TXD_FLAG_CPU_POST_DMA); - - tcp_hdr(skb)->check = 0; - - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - base_flags |= TXD_FLAG_TCPUDP_CSUM; - } - -#if TG3_VLAN_TAG_USED - if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) - base_flags |= (TXD_FLAG_VLAN | - (vlan_tx_tag_get(skb) << 16)); -#endif - - len = skb_headlen(skb); - - /* Queue skb data, a.k.a. the main skb fragment. */ - mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); - goto out_unlock; - } - - tnapi->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); - - if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && - !mss && skb->len > VLAN_ETH_FRAME_LEN) - base_flags |= TXD_FLAG_JMB_PKT; - - tg3_set_txd(tnapi, entry, mapping, len, base_flags, - (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); - - entry = NEXT_TX(entry); - - /* Now loop through additional data fragments, and queue them. */ - if (skb_shinfo(skb)->nr_frags > 0) { - last = skb_shinfo(skb)->nr_frags - 1; - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - len = frag->size; - mapping = pci_map_page(tp->pdev, - frag->page, - frag->page_offset, - len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) - goto dma_error; - - tnapi->tx_buffers[entry].skb = NULL; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, - mapping); - - tg3_set_txd(tnapi, entry, mapping, len, - base_flags, (i == last) | (mss << 1)); - - entry = NEXT_TX(entry); - } - } - - /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - - tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); - } - -out_unlock: - mmiowb(); - - return NETDEV_TX_OK; - -dma_error: - last = i; - entry = tnapi->tx_prod; - tnapi->tx_buffers[entry].skb = NULL; - pci_unmap_single(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - entry = NEXT_TX(entry); - - pci_unmap_page(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], - mapping), - frag->size, PCI_DMA_TODEVICE); - } - - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, - struct net_device *); +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround a rare TSO bug that may be triggered when the * TSO header is greater than 80 bytes. @@ -5877,7 +5945,7 @@ nskb = segs; segs = segs->next; nskb->next = NULL; - tg3_start_xmit_dma_bug(nskb, tp->dev); + tg3_start_xmit(nskb, tp->dev); } while (segs); tg3_tso_bug_end: @@ -5887,10 +5955,9 @@ } /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and - * support TG3_FLG2_HW_TSO_1 or firmware TSO only. + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. */ -static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss; @@ -5902,7 +5969,7 @@ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) tnapi++; /* We are running in BH disabled context with netif_tx_lock @@ -5940,7 +6007,7 @@ iph = ip_hdr(skb); tcp_opt_len = tcp_optlen(skb); - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + if (skb_is_gso_v6(skb)) { hdr_len = skb_headlen(skb) - ETH_HLEN; } else { u32 ip_tcp_len; @@ -5953,13 +6020,15 @@ } if (unlikely((ETH_HLEN + hdr_len) > 80) && - (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) + tg3_flag(tp, TSO_BUG)) return tg3_tso_bug(tp, skb); base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { tcp_hdr(skb)->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; } else @@ -5968,14 +6037,14 @@ IPPROTO_TCP, 0); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { + if (tg3_flag(tp, HW_TSO_3)) { mss |= (hdr_len & 0xc) << 12; if (hdr_len & 0x10) base_flags |= 0x00000010; base_flags |= (hdr_len & 0x3e0) << 5; - } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) + } else if (tg3_flag(tp, HW_TSO_2)) mss |= hdr_len << 9; - else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || + else if (tg3_flag(tp, HW_TSO_1) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { if (tcp_opt_len || iph->ihl > 5) { int tsflags; @@ -5992,13 +6061,14 @@ } } } + #if TG3_VLAN_TAG_USED - if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) + if (vlan_tx_tag_present(skb)) base_flags |= (TXD_FLAG_VLAN | (vlan_tx_tag_get(skb) << 16)); #endif - if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && !mss && skb->len > VLAN_ETH_FRAME_LEN) base_flags |= TXD_FLAG_JMB_PKT; @@ -6015,18 +6085,18 @@ would_hit_hwbug = 0; - if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) would_hit_hwbug = 1; - if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) && tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; - if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && + if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) && tg3_40bit_overflow_test(tp, mapping, len)) would_hit_hwbug = 1; - if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) + if (tg3_flag(tp, 5701_DMA_BUG)) would_hit_hwbug = 1; tg3_set_txd(tnapi, entry, mapping, len, base_flags, @@ -6052,19 +6122,21 @@ if (pci_dma_mapping_error(tp->pdev, mapping)) goto dma_error; - if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) would_hit_hwbug = 1; - if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) && tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; - if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && + if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) && tg3_40bit_overflow_test(tp, mapping, len)) would_hit_hwbug = 1; - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) tg3_set_txd(tnapi, entry, mapping, len, base_flags, (i == last)|(mss << 1)); else @@ -6142,16 +6214,16 @@ dev->mtu = new_mtu; if (new_mtu > ETH_DATA_LEN) { - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { - tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_clear(tp, TSO_CAPABLE); ethtool_op_set_tso(dev, 0); } else { - tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; + tg3_flag_set(tp, JUMBO_RING_ENABLE); } } else { - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; - tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; + if (tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, TSO_CAPABLE); + tg3_flag_clear(tp, JUMBO_RING_ENABLE); } } @@ -6199,13 +6271,13 @@ { int i; - if (tpr != &tp->prodring[0]) { + if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; i = (i + 1) & tp->rx_std_ring_mask) tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; i = (i + 1) & tp->rx_jmb_ring_mask) { @@ -6221,7 +6293,7 @@ tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); @@ -6245,10 +6317,10 @@ tpr->rx_jmb_cons_idx = 0; tpr->rx_jmb_prod_idx = 0; - if (tpr != &tp->prodring[0]) { + if (tpr != &tp->napi[0].prodring) { memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE(tp)); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) + if (tpr->rx_jmb_buffers) memset(&tpr->rx_jmb_buffers[0], 0, TG3_RX_JMB_BUFF_RING_SIZE(tp)); goto done; @@ -6258,7 +6330,7 @@ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; - if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && + if (tg3_flag(tp, 5780_CLASS) && tp->dev->mtu > ETH_DATA_LEN) rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); @@ -6291,12 +6363,12 @@ } } - if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) goto done; memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); - if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) goto done; for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { @@ -6339,13 +6411,13 @@ kfree(tpr->rx_jmb_buffers); tpr->rx_jmb_buffers = NULL; if (tpr->rx_std) { - pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), - tpr->rx_std, tpr->rx_std_mapping); + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); tpr->rx_std = NULL; } if (tpr->rx_jmb) { - pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp), - tpr->rx_jmb, tpr->rx_jmb_mapping); + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); tpr->rx_jmb = NULL; } } @@ -6358,20 +6430,23 @@ if (!tpr->rx_std_buffers) return -ENOMEM; - tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), - &tpr->rx_std_mapping); + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); if (!tpr->rx_std) goto err_out; - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), GFP_KERNEL); if (!tpr->rx_jmb_buffers) goto err_out; - tpr->rx_jmb = pci_alloc_consistent(tp->pdev, - TG3_RX_JMB_RING_BYTES(tp), - &tpr->rx_jmb_mapping); + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); if (!tpr->rx_jmb) goto err_out; } @@ -6397,7 +6472,7 @@ for (j = 0; j < tp->irq_cnt; j++) { struct tg3_napi *tnapi = &tp->napi[j]; - tg3_rx_prodring_free(tp, &tp->prodring[j]); + tg3_rx_prodring_free(tp, &tnapi->prodring); if (!tnapi->tx_buffers) continue; @@ -6469,7 +6544,7 @@ if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } @@ -6490,7 +6565,7 @@ struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->tx_ring) { - pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, tnapi->tx_ring, tnapi->tx_desc_mapping); tnapi->tx_ring = NULL; } @@ -6499,28 +6574,28 @@ tnapi->tx_buffers = NULL; if (tnapi->rx_rcb) { - pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), - tnapi->rx_rcb, - tnapi->rx_rcb_mapping); + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); tnapi->rx_rcb = NULL; } + tg3_rx_prodring_fini(tp, &tnapi->prodring); + if (tnapi->hw_status) { - pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, - tnapi->hw_status, - tnapi->status_mapping); + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); tnapi->hw_status = NULL; } } if (tp->hw_stats) { - pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), - tp->hw_stats, tp->stats_mapping); + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } - - for (i = 0; i < tp->irq_cnt; i++) - tg3_rx_prodring_fini(tp, &tp->prodring[i]); } /* @@ -6531,14 +6606,10 @@ { int i; - for (i = 0; i < tp->irq_cnt; i++) { - if (tg3_rx_prodring_init(tp, &tp->prodring[i])) - goto err_out; - } - - tp->hw_stats = pci_alloc_consistent(tp->pdev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping); + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, + GFP_KERNEL); if (!tp->hw_stats) goto err_out; @@ -6548,29 +6619,34 @@ struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; - tnapi->hw_status = pci_alloc_consistent(tp->pdev, - TG3_HW_STATUS_SIZE, - &tnapi->status_mapping); + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); if (!tnapi->hw_status) goto err_out; memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); sblk = tnapi->hw_status; + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + /* If multivector TSS is enabled, vector 0 does not handle * tx interrupts. Don't allocate any resources for it. */ - if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || - (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { + if ((!i && !tg3_flag(tp, ENABLE_TSS)) || + (i && tg3_flag(tp, ENABLE_TSS))) { tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * TG3_TX_RING_SIZE, GFP_KERNEL); if (!tnapi->tx_buffers) goto err_out; - tnapi->tx_ring = pci_alloc_consistent(tp->pdev, - TG3_TX_RING_BYTES, - &tnapi->tx_desc_mapping); + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); if (!tnapi->tx_ring) goto err_out; } @@ -6596,18 +6672,17 @@ break; } - tnapi->prodring = &tp->prodring[i]; - /* * If multivector RSS is enabled, vector 0 does not handle * rx or tx interrupts. Don't allocate any resources for it. */ - if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) + if (!i && tg3_flag(tp, ENABLE_RSS)) continue; - tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, - TG3_RX_RCB_RING_BYTES(tp), - &tnapi->rx_rcb_mapping); + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); if (!tnapi->rx_rcb) goto err_out; @@ -6631,7 +6706,7 @@ unsigned int i; u32 val; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { switch (ofs) { case RCVLSC_MODE: case DMAC_MODE: @@ -6740,6 +6815,10 @@ int i; u32 apedata; + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); if (apedata != APE_SEG_SIG_MAGIC) return; @@ -6776,7 +6855,7 @@ u32 event; u32 apedata; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + if (!tg3_flag(tp, ENABLE_APE)) return; switch (kind) { @@ -6791,6 +6870,8 @@ APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); event = APE_EVENT_STATUS_STATE_START; break; @@ -6802,6 +6883,16 @@ */ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + event = APE_EVENT_STATUS_STATE_UNLOAD; break; case RESET_KIND_SUSPEND: @@ -6822,7 +6913,7 @@ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, NIC_SRAM_FIRMWARE_MBOX_MAGIC1); - if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { switch (kind) { case RESET_KIND_INIT: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, @@ -6852,7 +6943,7 @@ /* tp->lock is held. */ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) { - if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { switch (kind) { case RESET_KIND_INIT: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, @@ -6876,7 +6967,7 @@ /* tp->lock is held. */ static void tg3_write_sig_legacy(struct tg3 *tp, int kind) { - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if (tg3_flag(tp, ENABLE_ASF)) { switch (kind) { case RESET_KIND_INIT: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, @@ -6927,9 +7018,8 @@ * of the above loop as an error, but do report the lack of * running firmware once. */ - if (i >= 100000 && - !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { - tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); netdev_info(tp->dev, "No firmware running\n"); } @@ -6962,10 +7052,10 @@ /* Set MAX PCI retry to zero. */ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) + tg3_flag(tp, PCIX_MODE)) val |= PCISTATE_RETRY_SAME_DMA; /* Allow reads and writes to the APE register and memory space. */ - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + if (tg3_flag(tp, ENABLE_APE)) val |= PCISTATE_ALLOW_APE_CTLSPC_WR | PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; @@ -6974,7 +7064,7 @@ pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) + if (tg3_flag(tp, PCI_EXPRESS)) pcie_set_readrq(tp->pdev, tp->pcie_readrq); else { pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, @@ -6985,7 +7075,7 @@ } /* Make sure PCI-X relaxed ordering bit is clear. */ - if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + if (tg3_flag(tp, PCIX_MODE)) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, @@ -6995,12 +7085,12 @@ pcix_cmd); } - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { + if (tg3_flag(tp, 5780_CLASS)) { /* Chip reset on 5780 will reset MSI enable bit, * so need to restore it. */ - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tg3_flag(tp, USING_MSI)) { u16 ctrl; pci_read_config_word(tp->pdev, @@ -7040,7 +7130,7 @@ tg3_save_pci_state(tp); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) + tg3_flag(tp, 5755_PLUS)) tw32(GRC_FASTBOOT_PC, 0); /* @@ -7059,7 +7149,7 @@ * at this time, but the irq handler may still be called due to irq * sharing or irqpoll. */ - tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; + tg3_flag_set(tp, CHIP_RESETTING); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) { @@ -7082,10 +7172,10 @@ /* do the reset */ val = GRC_MISC_CFG_CORECLK_RESET; - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if (tg3_flag(tp, PCI_EXPRESS)) { /* Force PCIe 1.0a mode */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && + !tg3_flag(tp, 57765_PLUS) && tr32(TG3_PCIE_PHY_TSTCTL) == (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); @@ -7103,8 +7193,7 @@ } /* Manage gphy power for all CPMU absent PCIe devices. */ - if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) val |= GRC_MISC_CFG_KEEP_GPHY_POWER; tw32(GRC_MISC_CFG, val); @@ -7137,7 +7226,7 @@ udelay(120); - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { + if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) { u16 val16; if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { @@ -7163,7 +7252,7 @@ * Older PCIe devices only support the 128 byte * MPS setting. Enforce the restriction. */ - if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) + if (!tg3_flag(tp, CPMU_PRESENT)) val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; pci_write_config_word(tp->pdev, tp->pcie_cap + PCI_EXP_DEVCTL, @@ -7182,10 +7271,11 @@ tg3_restore_pci_state(tp); - tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); val = 0; - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) + if (tg3_flag(tp, 5780_CLASS)) val = tr32(MEMARB_MODE); tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); @@ -7210,19 +7300,21 @@ tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - tp->mac_mode = MAC_MODE_PORT_MODE_TBI; - tw32_f(MAC_MODE, tp->mac_mode); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + val = tp->mac_mode; } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { - tp->mac_mode = MAC_MODE_PORT_MODE_GMII; - tw32_f(MAC_MODE, tp->mac_mode); - } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); - if (tp->mac_mode & MAC_MODE_APE_TX_EN) - tp->mac_mode |= MAC_MODE_TDE_ENABLE; - tw32_f(MAC_MODE, tp->mac_mode); + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + val = tp->mac_mode; } else - tw32_f(MAC_MODE, 0); + val = 0; + + tw32_f(MAC_MODE, val); udelay(40); tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); @@ -7233,28 +7325,33 @@ tg3_mdio_start(tp); - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && + if (tg3_flag(tp, PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { + !tg3_flag(tp, 57765_PLUS)) { val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + } + /* Reprobe ASF enable state. */ - tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; - tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; + tg3_flag_clear(tp, ENABLE_ASF); + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg; tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { - tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; + tg3_flag_set(tp, ENABLE_ASF); tp->last_event_jiffies = jiffies; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) - tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); } } @@ -7264,8 +7361,7 @@ /* tp->lock is held. */ static void tg3_stop_fw(struct tg3 *tp) { - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { /* Wait for RX cpu to ACK the previous event. */ tg3_wait_for_event_ack(tp); @@ -7311,8 +7407,7 @@ { int i; - BUG_ON(offset == TX_CPU_BASE && - (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { u32 val = tr32(GRC_VCPU_EXT_CTRL); @@ -7347,7 +7442,7 @@ } /* Clear firmware's nvram arbitration. */ - if (tp->tg3_flags & TG3_FLAG_NVRAM) + if (tg3_flag(tp, NVRAM)) tw32(NVRAM_SWARB, SWARB_REQ_CLR0); return 0; } @@ -7365,15 +7460,14 @@ int err, lock_err, i; void (*write_op)(struct tg3 *, u32, u32); - if (cpu_base == TX_CPU_BASE && - (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n", __func__); return -EINVAL; } - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + if (tg3_flag(tp, 5705_PLUS)) write_op = tg3_write_mem; else write_op = tg3_write_indirect_reg32; @@ -7459,8 +7553,6 @@ return 0; } -/* 5705 needs a special version of the TSO firmware. */ - /* tp->lock is held. */ static int tg3_load_tso_firmware(struct tg3 *tp) { @@ -7469,7 +7561,9 @@ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; int err, i; - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) return 0; fw_data = (void *)tp->fw->data; @@ -7538,7 +7632,7 @@ if (!netif_running(dev)) return 0; - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if (tg3_flag(tp, ENABLE_ASF)) { u32 addr0_high, addr0_low, addr1_high, addr1_low; addr0_high = tr32(MAC_ADDR_0_HIGH); @@ -7573,7 +7667,7 @@ (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), maxlen_flags); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tg3_write_mem(tp, (bdinfo_addr + TG3_BDINFO_NIC_ADDR), nic_addr); @@ -7584,7 +7678,7 @@ { int i; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { + if (!tg3_flag(tp, ENABLE_TSS)) { tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); @@ -7594,7 +7688,7 @@ tw32(HOSTCC_TXCOAL_MAXF_INT, 0); } - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { + if (!tg3_flag(tp, ENABLE_RSS)) { tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); @@ -7604,7 +7698,7 @@ tw32(HOSTCC_RXCOAL_MAXF_INT, 0); } - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { u32 val = ec->stats_block_coalesce_usecs; tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); @@ -7626,7 +7720,7 @@ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; tw32(reg, ec->rx_max_coalesced_frames_irq); - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { + if (tg3_flag(tp, ENABLE_TSS)) { reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; tw32(reg, ec->tx_coalesce_usecs); reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; @@ -7641,7 +7735,7 @@ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { + if (tg3_flag(tp, ENABLE_TSS)) { tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); @@ -7657,8 +7751,10 @@ struct tg3_napi *tnapi = &tp->napi[0]; /* Disable all transmit rings but the first. */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else @@ -7671,10 +7767,9 @@ /* Disable all receive return rings but the first. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; - else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) @@ -7689,18 +7784,24 @@ /* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; /* Zero mailbox registers. */ - if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { - for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { tp->napi[i].tx_prod = 0; tp->napi[i].tx_cons = 0; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) tw32_mailbox(tp->napi[i].prodmbox, 0); tw32_rx_mbox(tp->napi[i].consmbox, 0); tw32_mailbox_f(tp->napi[i].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[i].last_rx_cons = 0; + tp->napi[i].last_tx_cons = 0; } - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) + if (!tg3_flag(tp, ENABLE_TSS)) tw32_mailbox(tp->napi[0].prodmbox, 0); } else { tp->napi[0].tx_prod = 0; @@ -7710,7 +7811,7 @@ } /* Make sure the NIC-based send BD rings are disabled. */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; for (i = 0; i < 16; i++) tw32_tx_mbox(mbox + i * 8, 0); @@ -7770,12 +7871,53 @@ } } +static void tg3_setup_rxbd_thresholds(struct tg3 *tp) +{ + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; + + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + else + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); +} + /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { u32 val, rdmac_mode; int i, err, limit; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; tg3_disable_ints(tp); @@ -7783,7 +7925,7 @@ tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); - if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) + if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1); /* Enable MAC control of LPI */ @@ -7803,7 +7945,7 @@ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + if (tg3_flag(tp, ENABLE_APE)) val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; tw32_f(TG3_CPMU_EEE_MODE, val); @@ -7813,7 +7955,7 @@ TG3_CPMU_DBTMR1_LNKIDLE_2047US); tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR1_APE_TX_2047US | + TG3_CPMU_DBTMR2_APE_TX_2047US | TG3_CPMU_DBTMR2_TXIDXEQ_2047US); } @@ -7862,7 +8004,7 @@ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); } - if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) { + if (tg3_flag(tp, L1PLLPD_EN)) { u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of PL PCIE block registers. */ @@ -7892,6 +8034,22 @@ tw32(GRC_MODE, grc_mode); } + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } + val = tr32(TG3_CPMU_LSPD_10MB_CLK); val &= ~CPMU_LSPD_10MB_MACCLK_MASK; val |= CPMU_LSPD_10MB_MACCLK_6_25; @@ -7903,20 +8061,20 @@ * other revision. But do not set this on PCI Express * chips and don't even touch the clocks if the CPMU is present. */ - if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { - if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { + tg3_flag(tp, PCIX_MODE)) { val = tr32(TG3PCI_PCISTATE); val |= PCISTATE_RETRY_SAME_DMA; tw32(TG3PCI_PCISTATE, val); } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. */ @@ -7943,11 +8101,14 @@ if (err) return err; - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { + if (tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3PCI_DMA_RW_CTRL) & ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { @@ -7982,7 +8143,7 @@ tw32(GRC_MISC_CFG, val); /* Initialize MBUF/DESC pool. */ - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + if (tg3_flag(tp, 5750_PLUS)) { /* Do nothing. */ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); @@ -7992,7 +8153,7 @@ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); - } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { + } else if (tg3_flag(tp, TSO_CAPABLE)) { int fw_len; fw_len = tp->fw_len; @@ -8026,6 +8187,10 @@ val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; tw32(BUFMGR_MODE, val); for (i = 0; i < 2000; i++) { if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) @@ -8037,21 +8202,10 @@ return -ENODEV; } - /* Setup replenish threshold. */ - val = tp->rx_pending / 8; - if (val == 0) - val = 1; - else if (val > tp->rx_std_max_post) - val = tp->rx_std_max_post; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) - tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); - - if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) - val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; - } + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); - tw32(RCVBDI_STD_THRESH, val); + tg3_setup_rxbd_thresholds(tp); /* Initialize TG3_BDINFO's at: * RCVDBDI_STD_BD: standard eth size rx ring @@ -8074,33 +8228,31 @@ ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_std_mapping & 0xffffffff)); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) + if (!tg3_flag(tp, 5717_PLUS)) tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC); /* Disable the mini ring */ - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); /* Program the jumbo buffer descriptor ring control * blocks on those devices that have them. */ - if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { - /* Setup replenish threshold. */ - tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tpr->rx_jmb_mapping >> 32)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, - (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | - BDINFO_FLAGS_USE_EXT_RECV); - if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) || + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); @@ -8109,32 +8261,27 @@ BDINFO_FLAGS_DISABLED); } - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { + if (tg3_flag(tp, 57765_PLUS)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = RX_STD_MAX_SIZE_5705; + val = TG3_RX_STD_MAX_SIZE_5700; else - val = RX_STD_MAX_SIZE_5717; + val = TG3_RX_STD_MAX_SIZE_5717; val <<= BDINFO_FLAGS_MAXLEN_SHIFT; val |= (TG3_RX_STD_DMA_SZ << 2); } else val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; } else - val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); tpr->rx_std_prod_idx = tp->rx_pending; tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); - tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? - tp->rx_jumbo_pending : 0; + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { - tw32(STD_REPLENISH_LWM, 32); - tw32(JMB_REPLENISH_LWM, 16); - } - tg3_rings_reset(tp); /* Initialize MAC address and backoff seed. */ @@ -8147,10 +8294,16 @@ /* The slot time is changed by tg3_setup_phy if we * run at gigabit with half duplex. */ - tw32(MAC_TX_LENGTHS, - (2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); /* Receive rules. */ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); @@ -8175,45 +8328,54 @@ RDMAC_MODE_MBUF_RBD_CRPT_ENAB | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; - /* If statement applies to 5705 and 5750 PCI devices only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { - if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { + !tg3_flag(tp, IS_5788)) { rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; } } - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) + if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + if (tg3_flag(tp, 57765_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { + tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3_RDMA_RSRVCTRL_REG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { - val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; - val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; } tw32(TG3_RDMA_RSRVCTRL_REG, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | @@ -8221,12 +8383,12 @@ } /* Receive/send statistics. */ - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + if (tg3_flag(tp, 5750_PLUS)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_DACK_FIX; tw32(RCVLPC_STATS_ENABLE, val); } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && - (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + tg3_flag(tp, TSO_CAPABLE)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; tw32(RCVLPC_STATS_ENABLE, val); @@ -8249,7 +8411,7 @@ __tg3_set_coalesce(tp, &tp->coal); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { /* Status/statistics block address. See tg3_timer, * the tg3_periodic_fetch_stats call there, and * tg3_get_stats to see how this works for 5705/5750 chips. @@ -8275,7 +8437,7 @@ tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { @@ -8285,13 +8447,13 @@ udelay(10); } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) - tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; else tp->mac_mode = 0; tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + if (!tg3_flag(tp, 5705_PLUS) && !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) tp->mac_mode |= MAC_MODE_LINK_POLARITY; @@ -8299,12 +8461,12 @@ udelay(40); /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). - * If TG3_FLG2_IS_NIC is zero, we should read the + * If TG3_FLAG_IS_NIC is zero, we should read the * register to preserve the GPIO settings for LOMs. The GPIOs, * whether used as inputs or outputs, are set by boot code after * reset. */ - if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { + if (!tg3_flag(tp, IS_NIC)) { u32 gpio_mask; gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | @@ -8322,20 +8484,20 @@ tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; /* GPIO1 must be driven high for eeprom write protect */ - if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) + if (tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); } tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { val = tr32(MSGINT_MODE); val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; tw32(MSGINT_MODE, val); } - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); udelay(40); } @@ -8346,23 +8508,20 @@ WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | WDMAC_MODE_LNGREAD_ENAB); - /* If statement applies to 5705 and 5750 PCI devices only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { - if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { /* nothing */ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + !tg3_flag(tp, IS_5788)) { val |= WDMAC_MODE_RX_ACCEL; } } /* Enable host coalescing bug fix */ - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + if (tg3_flag(tp, 5755_PLUS)) val |= WDMAC_MODE_STATUS_TAG_FIX; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) @@ -8371,7 +8530,7 @@ tw32_f(WDMAC_MODE, val); udelay(40); - if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + if (tg3_flag(tp, PCIX_MODE)) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, @@ -8391,7 +8550,7 @@ udelay(40); tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) @@ -8403,15 +8562,16 @@ tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_flag(tp, LRG_PROD_RING_CAP)) val |= RCVDBDI_MODE_LRG_RING_SZ; tw32(RCVDBDI_MODE, val); tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_TSS)) val |= SNDBDI_MODE_MULTI_TXQ_EN; tw32(SNDBDI_MODE, val); tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); @@ -8422,20 +8582,28 @@ return err; } - if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { + if (tg3_flag(tp, TSO_CAPABLE)) { err = tg3_load_tso_firmware(tp); if (err) return err; } tp->tx_mode = TX_MODE_ENABLE; - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + + if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + tw32_f(MAC_TX_MODE, tp->tx_mode); udelay(100); - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { + if (tg3_flag(tp, ENABLE_RSS)) { u32 reg = MAC_RSS_INDIR_TBL_0; u8 *ent = (u8 *)&val; @@ -8464,10 +8632,10 @@ } tp->rx_mode = RX_MODE_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + if (tg3_flag(tp, 5755_PLUS)) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) + if (tg3_flag(tp, ENABLE_RSS)) tp->rx_mode |= RX_MODE_RSS_ENABLE | RX_MODE_RSS_ITBL_HASH_BITS_7 | RX_MODE_RSS_IPV6_HASH_EN | @@ -8514,11 +8682,11 @@ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { /* Use hardware link auto-negotiation */ - tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; + tg3_flag_set(tp, HW_AUTONEG); } if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { u32 tmp; tmp = tr32(SERDES_RX_CTRL); @@ -8528,7 +8696,7 @@ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); } - if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { + if (!tg3_flag(tp, USE_PHYLIB)) { if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; tp->link_config.speed = tp->link_config.orig_speed; @@ -8561,12 +8729,11 @@ tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); - if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) limit = 8; else limit = 16; - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) + if (tg3_flag(tp, ENABLE_ASF)) limit -= 4; switch (limit) { case 16: @@ -8604,7 +8771,7 @@ break; } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + if (tg3_flag(tp, ENABLE_APE)) /* Write our heartbeat update interval to APE. */ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, APE_HOST_HEARTBEAT_INT_DISABLE); @@ -8670,10 +8837,48 @@ TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); - TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); } +static void tg3_chk_missed_msi(struct tg3 *tp) +{ + u32 i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_has_work(tnapi)) { + if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && + tnapi->last_tx_cons == tnapi->tx_cons) { + if (tnapi->chk_msi_cnt < 1) { + tnapi->chk_msi_cnt++; + return; + } + tw32_mailbox(tnapi->int_mbox, + tnapi->last_tag << 24); + } + } + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = tnapi->rx_rcb_ptr; + tnapi->last_tx_cons = tnapi->tx_cons; + } +} + static void tg3_timer(unsigned long __opaque) { struct tg3 *tp = (struct tg3 *) __opaque; @@ -8683,7 +8888,11 @@ spin_lock(&tp->lock); - if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_chk_missed_msi(tp); + + if (!tg3_flag(tp, TAGGED_STATUS)) { /* All of this garbage is because when using non-tagged * IRQ status the mailbox/status_block protocol the chip * uses with the cpu is race prone. @@ -8697,7 +8906,7 @@ } if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; + tg3_flag_set(tp, RESTART_TIMER); spin_unlock(&tp->lock); schedule_work(&tp->reset_task); return; @@ -8706,16 +8915,13 @@ /* This part only runs once per second. */ if (!--tp->timer_counter) { - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + if (tg3_flag(tp, 5705_PLUS)) tg3_periodic_fetch_stats(tp); - if (tp->setlpicnt && !--tp->setlpicnt) { - u32 val = tr32(TG3_CPMU_EEE_MODE); - tw32(TG3_CPMU_EEE_MODE, - val | TG3_CPMU_EEEMD_LPI_ENABLE); - } + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); - if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { + if (tg3_flag(tp, USE_LINKCHG_REG)) { u32 mac_stat; int phy_event; @@ -8730,7 +8936,7 @@ if (phy_event) tg3_setup_phy(tp, 0); - } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { + } else if (tg3_flag(tp, POLL_SERDES)) { u32 mac_stat = tr32(MAC_STATUS); int need_setup = 0; @@ -8755,7 +8961,7 @@ tg3_setup_phy(tp, 0); } } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + tg3_flag(tp, 5780_CLASS)) { tg3_serdes_parallel_detect(tp); } @@ -8780,8 +8986,7 @@ * resets. */ if (!--tp->asf_counter) { - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, @@ -8817,16 +9022,16 @@ name[IFNAMSIZ-1] = 0; } - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { fn = tg3_msi; - if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + if (tg3_flag(tp, 1SHOT_MSI)) fn = tg3_msi_1shot; - flags = IRQF_SAMPLE_RANDOM; + flags = 0; } else { fn = tg3_interrupt; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + if (tg3_flag(tp, TAGGED_STATUS)) fn = tg3_interrupt_tagged; - flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; + flags = IRQF_SHARED; } return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); @@ -8850,8 +9055,7 @@ * Turn off MSI one shot mode. Otherwise this test has no * observable way to know whether the interrupt was delivered. */ - if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } @@ -8893,8 +9097,7 @@ if (intr_ok) { /* Reenable MSI one shot mode. */ - if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } @@ -8912,7 +9115,7 @@ int err; u16 pci_cmd; - if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) + if (!tg3_flag(tp, USING_MSI)) return 0; /* Turn off SERR reporting in case MSI terminates with Master @@ -8942,7 +9145,7 @@ pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + tg3_flag_clear(tp, USING_MSI); tp->napi[0].irq_vec = tp->pdev->irq; err = tg3_request_irq(tp, 0); @@ -9031,11 +9234,13 @@ for (i = 0; i < tp->irq_max; i++) tp->napi[i].irq_vec = msix_ent[i].vector; - tp->dev->real_num_tx_queues = 1; + netif_set_real_num_tx_queues(tp->dev, 1); if (tp->irq_cnt > 1) { - tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { - tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; + tg3_flag_set(tp, ENABLE_RSS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_flag_set(tp, ENABLE_TSS); netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); } } @@ -9045,8 +9250,8 @@ static void tg3_ints_init(struct tg3 *tp) { - if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && - !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { /* All MSI supporting chips should support tagged * status. Assert that this is the case. */ @@ -9055,34 +9260,35 @@ goto defcfg; } - if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) - tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; - else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && - pci_enable_msi(tp->pdev) == 0) - tp->tg3_flags2 |= TG3_FLG2_USING_MSI; + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { u32 msi_mode = tr32(MSGINT_MODE); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) msi_mode |= MSGINT_MODE_MULTIVEC_EN; tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); } defcfg: - if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { + if (!tg3_flag(tp, USING_MSIX)) { tp->irq_cnt = 1; tp->napi[0].irq_vec = tp->pdev->irq; - tp->dev->real_num_tx_queues = 1; + netif_set_real_num_tx_queues(tp->dev, 1); } } static void tg3_ints_fini(struct tg3 *tp) { - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + if (tg3_flag(tp, USING_MSIX)) pci_disable_msix(tp->pdev); - else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) + else if (tg3_flag(tp, USING_MSI)) pci_disable_msi(tp->pdev); - tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; - tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); } static int tg3_open(struct net_device *dev) @@ -9097,23 +9303,23 @@ return err; } else if (err) { netdev_warn(tp->dev, "TSO capability disabled\n"); - tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; - } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { netdev_notice(tp->dev, "TSO capability restored\n"); - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; + tg3_flag_set(tp, TSO_CAPABLE); } } netif_carrier_off(tp->dev); - err = tg3_set_power_state(tp, PCI_D0); + err = tg3_power_up(tp); if (err) return err; tg3_full_lock(tp, 0); tg3_disable_ints(tp); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; + tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); @@ -9130,6 +9336,8 @@ if (err) goto err_out1; + tg3_napi_init(tp); + tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { @@ -9152,7 +9360,9 @@ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); } else { - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + if (tg3_flag(tp, TAGGED_STATUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) tp->timer_offset = HZ; else tp->timer_offset = HZ / 10; @@ -9174,7 +9384,7 @@ if (err) goto err_out3; - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); if (err) { @@ -9186,8 +9396,7 @@ goto err_out2; } - if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { u32 val = tr32(PCIE_TRANSACTION_CFG); tw32(PCIE_TRANSACTION_CFG, @@ -9200,7 +9409,7 @@ tg3_full_lock(tp, 0); add_timer(&tp->timer); - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); tg3_enable_ints(tp); tg3_full_unlock(tp); @@ -9217,6 +9426,7 @@ err_out2: tg3_napi_disable(tp); + tg3_napi_fini(tp); tg3_free_consistent(tp); err_out1: @@ -9247,7 +9457,7 @@ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; + tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); @@ -9263,9 +9473,11 @@ memcpy(&tp->estats_prev, tg3_get_estats(tp), sizeof(tp->estats_prev)); + tg3_napi_fini(tp); + tg3_free_consistent(tp); - tg3_set_power_state(tp, PCI_D3hot); + tg3_power_down(tp); netif_carrier_off(tp->dev); @@ -9404,6 +9616,8 @@ ESTAT_ADD(nic_avoided_irqs); ESTAT_ADD(nic_tx_threshold_hit); + ESTAT_ADD(mbuf_lwm_thresh_hit); + return estats; } @@ -9513,13 +9727,13 @@ */ #if TG3_VLAN_TAG_USED if (!tp->vlgrp && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + !tg3_flag(tp, ENABLE_ASF)) rx_mode |= RX_MODE_KEEP_VLAN_TAG; #else /* By definition, VLAN is disabled always in this * case. */ - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!tg3_flag(tp, ENABLE_ASF)) rx_mode |= RX_MODE_KEEP_VLAN_TAG; #endif @@ -9573,82 +9787,26 @@ tg3_full_unlock(tp); } -#define TG3_REGDUMP_LEN (32 * 1024) - static int tg3_get_regs_len(struct net_device *dev) { - return TG3_REGDUMP_LEN; + return TG3_REG_BLK_SIZE; } static void tg3_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { - u32 *p = _p; struct tg3 *tp = netdev_priv(dev); - u8 *orig_p = _p; - int i; regs->version = 0; - memset(p, 0, TG3_REGDUMP_LEN); + memset(_p, 0, TG3_REG_BLK_SIZE); if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return; tg3_full_lock(tp, 0); -#define __GET_REG32(reg) (*(p)++ = tr32(reg)) -#define GET_REG32_LOOP(base, len) \ -do { p = (u32 *)(orig_p + (base)); \ - for (i = 0; i < len; i += 4) \ - __GET_REG32((base) + i); \ -} while (0) -#define GET_REG32_1(reg) \ -do { p = (u32 *)(orig_p + (reg)); \ - __GET_REG32((reg)); \ -} while (0) - - GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); - GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); - GET_REG32_LOOP(MAC_MODE, 0x4f0); - GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); - GET_REG32_1(SNDDATAC_MODE); - GET_REG32_LOOP(SNDBDS_MODE, 0x80); - GET_REG32_LOOP(SNDBDI_MODE, 0x48); - GET_REG32_1(SNDBDC_MODE); - GET_REG32_LOOP(RCVLPC_MODE, 0x20); - GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); - GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); - GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); - GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); - GET_REG32_1(RCVDCC_MODE); - GET_REG32_LOOP(RCVBDI_MODE, 0x20); - GET_REG32_LOOP(RCVCC_MODE, 0x14); - GET_REG32_LOOP(RCVLSC_MODE, 0x08); - GET_REG32_1(MBFREE_MODE); - GET_REG32_LOOP(HOSTCC_MODE, 0x100); - GET_REG32_LOOP(MEMARB_MODE, 0x10); - GET_REG32_LOOP(BUFMGR_MODE, 0x58); - GET_REG32_LOOP(RDMAC_MODE, 0x08); - GET_REG32_LOOP(WDMAC_MODE, 0x08); - GET_REG32_1(RX_CPU_MODE); - GET_REG32_1(RX_CPU_STATE); - GET_REG32_1(RX_CPU_PGMCTR); - GET_REG32_1(RX_CPU_HWBKPT); - GET_REG32_1(TX_CPU_MODE); - GET_REG32_1(TX_CPU_STATE); - GET_REG32_1(TX_CPU_PGMCTR); - GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); - GET_REG32_LOOP(FTQ_RESET, 0x120); - GET_REG32_LOOP(MSGINT_MODE, 0x0c); - GET_REG32_1(DMAC_MODE); - GET_REG32_LOOP(GRC_MODE, 0x4c); - if (tp->tg3_flags & TG3_FLAG_NVRAM) - GET_REG32_LOOP(NVRAM_CMD, 0x24); - -#undef __GET_REG32 -#undef GET_REG32_LOOP -#undef GET_REG32_1 + tg3_dump_legacy_regs(tp, (u32 *)_p); tg3_full_unlock(tp); } @@ -9668,7 +9826,7 @@ u32 i, offset, len, b_offset, b_count; __be32 val; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) + if (tg3_flag(tp, NO_NVRAM)) return -EINVAL; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) @@ -9697,7 +9855,7 @@ eeprom->len += b_count; } - /* read bytes upto the last 4 byte boundary */ + /* read bytes up to the last 4 byte boundary */ pd = &data[eeprom->len]; for (i = 0; i < (len - (len & 3)); i += 4) { ret = tg3_nvram_read_be32(tp, offset + i, &val); @@ -9736,7 +9894,7 @@ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; - if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || + if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; @@ -9788,7 +9946,7 @@ { struct tg3 *tp = netdev_priv(dev); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; @@ -9816,8 +9974,11 @@ cmd->advertising = tp->link_config.advertising; if (netif_running(dev)) { - cmd->speed = tp->link_config.active_speed; + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); cmd->duplex = tp->link_config.active_duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_INVALID); + cmd->duplex = DUPLEX_INVALID; } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; @@ -9830,8 +9991,9 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; @@ -9879,14 +10041,14 @@ cmd->advertising &= mask; } else { if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { - if (cmd->speed != SPEED_1000) + if (speed != SPEED_1000) return -EINVAL; if (cmd->duplex != DUPLEX_FULL) return -EINVAL; } else { - if (cmd->speed != SPEED_100 && - cmd->speed != SPEED_10) + if (speed != SPEED_100 && + speed != SPEED_10) return -EINVAL; } } @@ -9901,7 +10063,7 @@ tp->link_config.duplex = DUPLEX_INVALID; } else { tp->link_config.advertising = 0; - tp->link_config.speed = cmd->speed; + tp->link_config.speed = speed; tp->link_config.duplex = cmd->duplex; } @@ -9931,14 +10093,12 @@ { struct tg3 *tp = netdev_priv(dev); - if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && - device_can_wakeup(&tp->pdev->dev)) + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) wol->supported = WAKE_MAGIC; else wol->supported = 0; wol->wolopts = 0; - if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && - device_can_wakeup(&tp->pdev->dev)) + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) wol->wolopts = WAKE_MAGIC; memset(&wol->sopass, 0, sizeof(wol->sopass)); } @@ -9951,17 +10111,16 @@ if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if ((wol->wolopts & WAKE_MAGIC) && - !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) return -EINVAL; + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + spin_lock_bh(&tp->lock); - if (wol->wolopts & WAKE_MAGIC) { - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; - device_set_wakeup_enable(dp, true); - } else { - tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; - device_set_wakeup_enable(dp, false); - } + if (device_may_wakeup(dp)) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); spin_unlock_bh(&tp->lock); return 0; @@ -9983,17 +10142,17 @@ { struct tg3 *tp = netdev_priv(dev); - if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + if (!tg3_flag(tp, TSO_CAPABLE)) { if (value) return -EINVAL; return 0; } if ((dev->features & NETIF_F_IPV6_CSUM) && - ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { + ((tg3_flag(tp, HW_TSO_2)) || + (tg3_flag(tp, HW_TSO_3)))) { if (value) { dev->features |= NETIF_F_TSO6; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + if ((tg3_flag(tp, HW_TSO_3)) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || @@ -10017,7 +10176,7 @@ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) return -EINVAL; - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); @@ -10046,7 +10205,7 @@ ering->rx_max_pending = tp->rx_std_ring_mask; ering->rx_mini_max_pending = 0; - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) + if (tg3_flag(tp, JUMBO_RING_ENABLE)) ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; else ering->rx_jumbo_max_pending = 0; @@ -10055,7 +10214,7 @@ ering->rx_pending = tp->rx_pending; ering->rx_mini_pending = 0; - if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) + if (tg3_flag(tp, JUMBO_RING_ENABLE)) ering->rx_jumbo_pending = tp->rx_jumbo_pending; else ering->rx_jumbo_pending = 0; @@ -10072,7 +10231,7 @@ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || (ering->tx_pending > TG3_TX_RING_SIZE - 1) || (ering->tx_pending <= MAX_SKB_FRAGS) || - ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && + (tg3_flag(tp, TSO_BUG) && (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) return -EINVAL; @@ -10086,12 +10245,12 @@ tp->rx_pending = ering->rx_pending; - if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && + if (tg3_flag(tp, MAX_RXPEND_64) && tp->rx_pending > 63) tp->rx_pending = 63; tp->rx_jumbo_pending = ering->rx_jumbo_pending; - for (i = 0; i < TG3_IRQ_MAX_VECS; i++) + for (i = 0; i < tp->irq_max; i++) tp->napi[i].tx_pending = ering->tx_pending; if (netif_running(dev)) { @@ -10113,7 +10272,7 @@ { struct tg3 *tp = netdev_priv(dev); - epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) epause->rx_pause = 1; @@ -10131,7 +10290,7 @@ struct tg3 *tp = netdev_priv(dev); int err = 0; - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { u32 newadv; struct phy_device *phydev; @@ -10139,8 +10298,7 @@ if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && - ((epause->rx_pause && !epause->tx_pause) || - (!epause->rx_pause && epause->tx_pause)))) + (epause->rx_pause != epause->tx_pause))) return -EINVAL; tp->link_config.flowctrl = 0; @@ -10160,9 +10318,9 @@ newadv = 0; if (epause->autoneg) - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_set(tp, PAUSE_AUTONEG); else - tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_clear(tp, PAUSE_AUTONEG); if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { u32 oldadv = phydev->advertising & @@ -10204,9 +10362,9 @@ tg3_full_lock(tp, irq_sync); if (epause->autoneg) - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_set(tp, PAUSE_AUTONEG); else - tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; + tg3_flag_clear(tp, PAUSE_AUTONEG); if (epause->rx_pause) tp->link_config.flowctrl |= FLOW_CTRL_RX; else @@ -10232,14 +10390,14 @@ static u32 tg3_get_rx_csum(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; + return (tg3_flag(tp, RX_CHECKSUMS)) != 0; } static int tg3_set_rx_csum(struct net_device *dev, u32 data) { struct tg3 *tp = netdev_priv(dev); - if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { + if (tg3_flag(tp, BROKEN_CHECKSUMS)) { if (data != 0) return -EINVAL; return 0; @@ -10247,9 +10405,9 @@ spin_lock_bh(&tp->lock); if (data) - tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; + tg3_flag_set(tp, RX_CHECKSUMS); else - tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; + tg3_flag_clear(tp, RX_CHECKSUMS); spin_unlock_bh(&tp->lock); return 0; @@ -10259,13 +10417,13 @@ { struct tg3 *tp = netdev_priv(dev); - if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { + if (tg3_flag(tp, BROKEN_CHECKSUMS)) { if (data != 0) return -EINVAL; return 0; } - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + if (tg3_flag(tp, 5755_PLUS)) ethtool_op_set_tx_ipv6_csum(dev, data); else ethtool_op_set_tx_csum(dev, data); @@ -10339,10 +10497,87 @@ memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); } +static __be32 * tg3_vpd_readblock(struct tg3 *tp) +{ + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + return buf; + +error: + kfree(buf); + return NULL; +} + #define NVRAM_TEST_SIZE 0x100 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c +#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 +#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 +#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x4c #define NVRAM_SELFBOOT_HW_SIZE 0x20 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c @@ -10352,7 +10587,7 @@ __be32 *buf; int i, j, k, err = 0, size; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) + if (tg3_flag(tp, NO_NVRAM)) return 0; if (tg3_nvram_read(tp, 0, &magic) != 0) @@ -10373,8 +10608,17 @@ case TG3_EEPROM_SB_REVISION_3: size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; default: - return 0; + return -EIO; } } else return 0; @@ -10466,16 +10710,50 @@ goto out; } + err = -EIO; + /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); - if (csum != be32_to_cpu(buf[0x10/4])) + if (csum != le32_to_cpu(buf[0x10/4])) goto out; /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); - if (csum != be32_to_cpu(buf[0xfc/4])) + if (csum != le32_to_cpu(buf[0xfc/4])) goto out; + kfree(buf); + + buf = tg3_vpd_readblock(tp); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + err = 0; out: @@ -10660,9 +10938,9 @@ }; is_5705 = is_5750 = 0; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { is_5705 = 1; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) + if (tg3_flag(tp, 5750_PLUS)) is_5750 = 1; } @@ -10673,7 +10951,7 @@ if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) continue; - if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && + if (tg3_flag(tp, IS_5788) && (reg_tbl[i].flags & TG3_FL_NOT_5788)) continue; @@ -10796,16 +11074,15 @@ int err = 0; int i; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_flag(tp, 5717_PLUS)) mem_tbl = mem_tbl_5717; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) mem_tbl = mem_tbl_57765; - else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) + else if (tg3_flag(tp, 5755_PLUS)) mem_tbl = mem_tbl_5755; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mem_tbl = mem_tbl_5906; - else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + else if (tg3_flag(tp, 5705_PLUS)) mem_tbl = mem_tbl_5705; else mem_tbl = mem_tbl_570x; @@ -10832,13 +11109,14 @@ int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; struct tg3_napi *tnapi, *rnapi; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; tnapi = &tp->napi[0]; rnapi = &tp->napi[0]; if (tp->irq_cnt > 1) { - rnapi = &tp->napi[1]; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) tnapi = &tp->napi[1]; } coal_now = tnapi->coal_now | rnapi->coal_now; @@ -10846,14 +11124,17 @@ if (loopback_mode == TG3_MAC_LOOPBACK) { /* HW errata - mac loopback fails in some cases on 5780. * Normal traffic and PHY loopback are not affected by - * errata. + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + tg3_flag(tp, CPMU_PRESENT)) return 0; - mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_PORT_INT_LPBACK; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + mac_mode |= MAC_MODE_PORT_INT_LPBACK; + if (!tg3_flag(tp, 5705_PLUS)) mac_mode |= MAC_MODE_LINK_POLARITY; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) mac_mode |= MAC_MODE_PORT_MODE_MII; @@ -10874,7 +11155,8 @@ tg3_writephy(tp, MII_BMCR, val); udelay(40); - mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_writephy(tp, MII_TG3_FET_PTEST, MII_TG3_FET_PTEST_FRC_TX_LINK | @@ -10902,6 +11184,13 @@ MII_TG3_EXT_CTRL_LNK3_LED_MODE); } tw32(MAC_MODE, mac_mode); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } } else { return -EINVAL; } @@ -11000,28 +11289,44 @@ return err; } -#define TG3_MAC_LOOPBACK_FAILED 1 -#define TG3_PHY_LOOPBACK_FAILED 2 -#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ - TG3_PHY_LOOPBACK_FAILED) +#define TG3_STD_LOOPBACK_FAILED 1 +#define TG3_JMB_LOOPBACK_FAILED 2 + +#define TG3_MAC_LOOPBACK_SHIFT 0 +#define TG3_PHY_LOOPBACK_SHIFT 4 +#define TG3_LOOPBACK_FAILED 0x00000033 static int tg3_test_loopback(struct tg3 *tp) { int err = 0; - u32 cpmuctrl = 0; + u32 eee_cap, cpmuctrl = 0; if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + err = tg3_reset_hw(tp, 1); - if (err) - return TG3_LOOPBACK_FAILED; + if (err) { + err = TG3_LOOPBACK_FAILED; + goto done; + } + + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } /* Turn off gphy autopowerdown. */ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, false); - if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + if (tg3_flag(tp, CPMU_PRESENT)) { int i; u32 status; @@ -11035,8 +11340,10 @@ udelay(10); } - if (status != CPMU_MUTEX_GNT_DRIVER) - return TG3_LOOPBACK_FAILED; + if (status != CPMU_MUTEX_GNT_DRIVER) { + err = TG3_LOOPBACK_FAILED; + goto done; + } /* Turn off link-based power management. */ cpmuctrl = tr32(TG3_CPMU_CTRL); @@ -11046,9 +11353,9 @@ } if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) - err |= TG3_MAC_LOOPBACK_FAILED; + err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; - if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + if (tg3_flag(tp, CPMU_PRESENT)) { tw32(TG3_CPMU_CTRL, cpmuctrl); /* Release the mutex */ @@ -11056,15 +11363,19 @@ } if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { + !tg3_flag(tp, USE_PHYLIB)) { if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) - err |= TG3_PHY_LOOPBACK_FAILED; + err |= TG3_STD_LOOPBACK_FAILED << + TG3_PHY_LOOPBACK_SHIFT; } /* Re-enable gphy autopowerdown. */ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); +done: + tp->phy_flags |= eee_cap; + return err; } @@ -11074,7 +11385,7 @@ struct tg3 *tp = netdev_priv(dev); if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - tg3_set_power_state(tp, PCI_D0); + tg3_power_up(tp); memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@ -11100,7 +11411,7 @@ tg3_halt(tp, RESET_KIND_SUSPEND, 1); err = tg3_nvram_lock(tp); tg3_halt_cpu(tp, RX_CPU_BASE); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tg3_halt_cpu(tp, TX_CPU_BASE); if (!err) tg3_nvram_unlock(tp); @@ -11130,7 +11441,7 @@ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); if (netif_running(dev)) { - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); err2 = tg3_restart_hw(tp, 1); if (!err2) tg3_netif_start(tp); @@ -11142,7 +11453,7 @@ tg3_phy_start(tp); } if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - tg3_set_power_state(tp, PCI_D3hot); + tg3_power_down(tp); } @@ -11152,7 +11463,7 @@ struct tg3 *tp = netdev_priv(dev); int err; - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; @@ -11171,7 +11482,7 @@ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11187,7 +11498,7 @@ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11227,7 +11538,6 @@ tg3_full_unlock(tp); } #endif - static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct tg3 *tp = netdev_priv(dev); @@ -11242,7 +11552,7 @@ u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + if (!tg3_flag(tp, 5705_PLUS)) { max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; max_stat_coal_ticks = MAX_STAT_COAL_TICKS; @@ -11361,8 +11671,7 @@ { u32 val; - if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || - tg3_nvram_read(tp, 0, &val) != 0) + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) return; /* Selfboot format */ @@ -11397,19 +11706,19 @@ nvcfg1 = tr32(NVRAM_CFG1); if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, FLASH); } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; @@ -11418,12 +11727,12 @@ case FLASH_VENDOR_ATMEL_EEPROM: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_ST: tp->nvram_jedecnum = JEDEC_ST; tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_SAIFUN: tp->nvram_jedecnum = JEDEC_SAIFUN; @@ -11438,7 +11747,7 @@ } else { tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); } } @@ -11477,29 +11786,29 @@ /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tg3_flag_set(tp, PROTECTED_NVRAM); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); break; } - if (tp->tg3_flags2 & TG3_FLG2_FLASH) { + if (tg3_flag(tp, FLASH)) { tg3_nvram_get_pagesize(tp, nvcfg1); } else { /* For eeprom, set pagesize to maximum eeprom size */ @@ -11518,7 +11827,7 @@ /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tg3_flag_set(tp, PROTECTED_NVRAM); protect = 1; } @@ -11529,8 +11838,8 @@ case FLASH_5755VENDOR_ATMEL_FLASH_3: case FLASH_5755VENDOR_ATMEL_FLASH_5: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 264; if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) @@ -11547,8 +11856,8 @@ case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) tp->nvram_size = (protect ? @@ -11578,7 +11887,7 @@ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; @@ -11589,16 +11898,16 @@ case FLASH_5755VENDOR_ATMEL_FLASH_2: case FLASH_5755VENDOR_ATMEL_FLASH_3: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 264; break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; break; } @@ -11612,7 +11921,7 @@ /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tg3_flag_set(tp, PROTECTED_NVRAM); protect = 1; } @@ -11627,9 +11936,9 @@ case FLASH_5761VENDOR_ATMEL_MDB081D: case FLASH_5761VENDOR_ATMEL_MDB161D: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); tp->nvram_pagesize = 256; break; case FLASH_5761VENDOR_ST_A_M45PE20: @@ -11641,8 +11950,8 @@ case FLASH_5761VENDOR_ST_M_M45PE80: case FLASH_5761VENDOR_ST_M_M45PE16: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; break; } @@ -11682,7 +11991,7 @@ static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) { tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; } @@ -11696,7 +12005,7 @@ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; @@ -11710,8 +12019,8 @@ case FLASH_57780VENDOR_ATMEL_AT45DB041D: case FLASH_57780VENDOR_ATMEL_AT45DB041B: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: @@ -11733,8 +12042,8 @@ case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ST_M45PE10: @@ -11749,13 +12058,13 @@ } break; default: - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; + tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } @@ -11769,7 +12078,7 @@ case FLASH_5717VENDOR_ATMEL_EEPROM: case FLASH_5717VENDOR_MICRO_EEPROM: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; @@ -11783,11 +12092,13 @@ case FLASH_5717VENDOR_ATMEL_ADB021D: case FLASH_5717VENDOR_ATMEL_45USPT: tp->nvram_jedecnum = JEDEC_ATMEL; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; case FLASH_5717VENDOR_ATMEL_ADB021B: case FLASH_5717VENDOR_ATMEL_ADB021D: tp->nvram_size = TG3_NVRAM_SIZE_256KB; @@ -11808,13 +12119,15 @@ case FLASH_5717VENDOR_ST_25USPT: case FLASH_5717VENDOR_ST_45USPT: tp->nvram_jedecnum = JEDEC_ST; - tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; - tp->tg3_flags2 |= TG3_FLG2_FLASH; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5717VENDOR_ST_M_M25PE20: - case FLASH_5717VENDOR_ST_A_M25PE20: case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: case FLASH_5717VENDOR_ST_A_M45PE20: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; @@ -11824,13 +12137,125 @@ } break; default: - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; + tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } /* Chips other than 5700/5701 use the NVRAM for fetching info. */ @@ -11850,7 +12275,7 @@ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - tp->tg3_flags |= TG3_FLAG_NVRAM; + tg3_flag_set(tp, NVRAM); if (tg3_nvram_lock(tp)) { netdev_warn(tp->dev, @@ -11880,6 +12305,8 @@ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -11890,7 +12317,8 @@ tg3_nvram_unlock(tp); } else { - tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); tg3_get_eeprom_size(tp); } @@ -12073,7 +12501,7 @@ nvram_cmd |= NVRAM_CMD_LAST; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && - !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && + !tg3_flag(tp, 5755_PLUS) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -12083,7 +12511,7 @@ break; } - if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { + if (!tg3_flag(tp, FLASH)) { /* We always do complete word writes to eeprom. */ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); } @@ -12099,13 +12527,13 @@ { int ret; - if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & ~GRC_LCLCTRL_GPIO_OUTPUT1); udelay(40); } - if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { + if (!tg3_flag(tp, NVRAM)) { ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); } else { u32 grc_mode; @@ -12115,16 +12543,13 @@ return ret; tg3_enable_nvram_access(tp); - if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); - if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || - !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { - + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { ret = tg3_nvram_write_block_buffered(tp, offset, len, buf); } else { @@ -12139,7 +12564,7 @@ tg3_nvram_unlock(tp); } - if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(40); } @@ -12261,19 +12686,22 @@ tp->led_ctrl = LED_CTRL_MODE_PHY_1; /* Assume an onboard device and WOL capable by default. */ - tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { - tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; - tp->tg3_flags2 |= TG3_FLG2_IS_NIC; + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); } val = tr32(VCPU_CFGSHDW); if (val & VCPU_CFGSHDW_ASPM_DBNC) - tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; + tg3_flag_set(tp, ASPM_WORKAROUND); if ((val & VCPU_CFGSHDW_WOL_ENABLE) && - (val & VCPU_CFGSHDW_WOL_MAGPKT)) - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } goto done; } @@ -12288,9 +12716,9 @@ tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); ver >>= NIC_SRAM_DATA_VER_SHIFT; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && (ver > 0) && (ver < 0x100)) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); @@ -12314,13 +12742,13 @@ tp->phy_id = eeprom_phy_id; if (eeprom_phy_serdes) { - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else tp->phy_flags |= TG3_PHYFLG_MII_SERDES; } - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) + if (tg3_flag(tp, 5750_PLUS)) led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | SHASTA_EXT_LED_MODE_MASK); else @@ -12380,34 +12808,36 @@ tp->led_ctrl = LED_CTRL_MODE_PHY_1; if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { - tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; + tg3_flag_set(tp, EEPROM_WRITE_PROT); if ((tp->pdev->subsystem_vendor == PCI_VENDOR_ID_ARIMA) && (tp->pdev->subsystem_device == 0x205a || tp->pdev->subsystem_device == 0x2063)) - tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; + tg3_flag_clear(tp, EEPROM_WRITE_PROT); } else { - tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; - tp->tg3_flags2 |= TG3_FLG2_IS_NIC; + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); } if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { - tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) - tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); } if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && - (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) - tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) - tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; + tg3_flag_clear(tp, WOL_CAP); - if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && - (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) - tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; + if (tg3_flag(tp, WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } if (cfg2 & (1 << 17)) tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; @@ -12417,33 +12847,35 @@ if (cfg2 & (1 << 18)) tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; - if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) || - ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) && + if ((tg3_flag(tp, 57765_PLUS) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && + if (tg3_flag(tp, PCI_EXPRESS) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { + !tg3_flag(tp, 57765_PLUS)) { u32 cfg3; tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) - tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; + tg3_flag_set(tp, ASPM_WORKAROUND); } if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) - tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE; + tg3_flag_set(tp, RGMII_INBAND_DISABLE); if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) - tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) - tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); } done: - device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); - device_set_wakeup_enable(&tp->pdev->dev, - tp->tg3_flags & TG3_FLAG_WOL_ENABLE); + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); } static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) @@ -12495,21 +12927,53 @@ return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); } +static void __devinit tg3_phy_init_link_config(struct tg3 *tp) +{ + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; +} + static int __devinit tg3_phy_probe(struct tg3 *tp) { u32 hw_phy_id_1, hw_phy_id_2; u32 hw_phy_id, hw_phy_id_masked; int err; - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, USE_PHYLIB)) return tg3_phy_init(tp); /* Reading the PHY ID register can conflict with ASF * firmware access to the PHY hardware. */ err = 0; - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; } else { /* Now read the physical PHY_ID from the chip and verify @@ -12562,10 +13026,12 @@ tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + tg3_phy_init_link_config(tp); + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { - u32 bmsr, adv_reg, tg3_ctrl, mask; + !tg3_flag(tp, ENABLE_APE) && + !tg3_flag(tp, ENABLE_ASF)) { + u32 bmsr, mask; tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && @@ -12576,36 +13042,18 @@ if (err) return err; - adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL | - ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); - tg3_ctrl = 0; - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | - MII_TG3_CTRL_ADV_1000_FULL); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | - MII_TG3_CTRL_ENABLE_AS_MASTER); - } + tg3_phy_set_wirespeed(tp); mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); if (!tg3_copper_is_advertising_all(tp, mask)) { - tg3_writephy(tp, MII_ADVERTISE, adv_reg); - - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) - tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); } - tg3_phy_set_wirespeed(tp); - - tg3_writephy(tp, MII_ADVERTISE, adv_reg); - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) - tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); } skip_phy_reset: @@ -12617,60 +13065,18 @@ err = tg3_init_5401phy_dsp(tp); } - if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) - tp->link_config.advertising = - (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | - ADVERTISED_FIBRE); - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - tp->link_config.advertising &= - ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - return err; } static void __devinit tg3_read_vpd(struct tg3 *tp) { - u8 vpd_data[TG3_NVM_VPD_LEN]; + u8 *vpd_data; unsigned int block_end, rosize, len; int j, i = 0; - u32 magic; - - if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || - tg3_nvram_read(tp, 0x0, &magic)) - goto out_not_found; - - if (magic == TG3_EEPROM_MAGIC) { - for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { - u32 tmp; - - /* The data is in little-endian format in NVRAM. - * Use the big-endian read routines to preserve - * the byte order as it exists in NVRAM. - */ - if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) - goto out_not_found; - - memcpy(&vpd_data[i], &tmp, sizeof(tmp)); - } - } else { - ssize_t cnt; - unsigned int pos = 0; - for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { - cnt = pci_read_vpd(tp->pdev, pos, - TG3_NVM_VPD_LEN - pos, - &vpd_data[pos]); - if (cnt == -ETIMEDOUT || -EINTR) - cnt = 0; - else if (cnt < 0) - goto out_not_found; - } - if (pos != TG3_NVM_VPD_LEN) - goto out_not_found; - } + vpd_data = (u8 *)tg3_vpd_readblock(tp); + if (!vpd_data) + goto out_no_vpd; i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, PCI_VPD_LRDT_RO_DATA); @@ -12724,43 +13130,51 @@ memcpy(tp->board_part_number, &vpd_data[i], len); - return; - out_not_found: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + +out_no_vpd: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) - strcpy(tp->board_part_number, "BCM57780"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) - strcpy(tp->board_part_number, "BCM57760"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) - strcpy(tp->board_part_number, "BCM57790"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) - strcpy(tp->board_part_number, "BCM57788"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) - strcpy(tp->board_part_number, "BCM57761"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) - strcpy(tp->board_part_number, "BCM57765"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) - strcpy(tp->board_part_number, "BCM57781"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) - strcpy(tp->board_part_number, "BCM57785"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) - strcpy(tp->board_part_number, "BCM57791"); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) - strcpy(tp->board_part_number, "BCM57795"); - else + } else { +nomatch: strcpy(tp->board_part_number, "none"); + } } static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) @@ -12869,6 +13283,9 @@ case TG3_EEPROM_SB_REVISION_5: offset = TG3_EEPROM_SB_F1R5_EDH_OFF; break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; default: return; } @@ -12914,7 +13331,7 @@ if (offset == TG3_NVM_DIR_END) return; - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + if (!tg3_flag(tp, 5705_PLUS)) start = 0x08000000; else if (tg3_nvram_read(tp, offset - 4, &start)) return; @@ -12954,8 +13371,7 @@ u32 apedata; char *fwtype; - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) return; apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); @@ -12968,10 +13384,12 @@ apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); - if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { + tg3_flag_set(tp, APE_HAS_NCSI); fwtype = "NCSI"; - else + } else { fwtype = "DASH"; + } vlen = strlen(tp->fw_ver); @@ -12991,7 +13409,7 @@ if (tp->fw_ver[0] != 0) vpd_vers = true; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { + if (tg3_flag(tp, NO_NVRAM)) { strcat(tp->fw_ver, "sb"); return; } @@ -13008,8 +13426,7 @@ else return; - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers) + if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers) goto done; tg3_read_mgmtfw_ver(tp); @@ -13020,7 +13437,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); -static void inline vlan_features_add(struct net_device *dev, unsigned long flags) +static inline void vlan_features_add(struct net_device *dev, unsigned long flags) { #if TG3_VLAN_TAG_USED dev->vlan_features |= flags; @@ -13029,27 +13446,23 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - return 4096; - else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) - return 1024; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; else - return 512; + return TG3_RX_RET_MAX_SIZE_5705; } +static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, +}; + static int __devinit tg3_get_invariants(struct tg3 *tp) { - static struct pci_device_id write_reorder_chipsets[] = { - { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_FE_GATE_700C) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_8131_BRIDGE) }, - { PCI_DEVICE(PCI_VENDOR_ID_VIA, - PCI_DEVICE_ID_VIA_8385_0) }, - { }, - }; u32 misc_ctrl_reg; u32 pci_state_reg, grc_misc_cfg; u32 val; @@ -13083,8 +13496,8 @@ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) pci_read_config_dword(tp->pdev, TG3PCI_GEN2_PRODID_ASICREV, &prod_id_asic_rev); @@ -13161,15 +13574,14 @@ if (bridge->subordinate && (bridge->subordinate->number == tp->pdev->bus->number)) { - - tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; + tg3_flag_set(tp, ICH_WORKAROUND); pci_dev_put(bridge); break; } } } - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { static struct tg3_dev_id { u32 vendor; u32 device; @@ -13194,7 +13606,7 @@ tp->pdev->bus->number) && (bridge->subordinate->subordinate >= tp->pdev->bus->number)) { - tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG; + tg3_flag_set(tp, 5701_DMA_BUG); pci_dev_put(bridge); break; } @@ -13209,8 +13621,8 @@ */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; - tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; + tg3_flag_set(tp, 5780_CLASS); + tg3_flag_set(tp, 40BIT_DMA_BUG); tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); } else { struct pci_dev *bridge = NULL; @@ -13224,7 +13636,7 @@ tp->pdev->bus->number) && (bridge->subordinate->subordinate >= tp->pdev->bus->number)) { - tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; + tg3_flag_set(tp, 40BIT_DMA_BUG); pci_dev_put(bridge); break; } @@ -13239,13 +13651,18 @@ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) tp->pdev_peer = tg3_find_peer(tp); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tp->tg3_flags3 |= TG3_FLG3_5717_PLUS; + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); /* Intentionally exclude ASIC_REV_5906 */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || @@ -13254,94 +13671,103 @@ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) - tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || - (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) - tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; - - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || - (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) - tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; - - /* 5700 B0 chips do not support checksumming correctly due - * to hardware bugs. - */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) - tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; - else { - unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO; + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); - tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; - if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) - features |= NETIF_F_IPV6_CSUM; - tp->dev->features |= features; - vlan_features_add(tp->dev, features); - } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); /* Determine TSO capabilities */ - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; - else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; - else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) - tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; + tg3_flag_clear(tp, TSO_BUG); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { - tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; + tg3_flag_set(tp, TSO_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) tp->fw_needed = FIRMWARE_TG3TSO5; else tp->fw_needed = FIRMWARE_TG3TSO; } + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tg3_flag_set(tp, TSO_CAPABLE); + else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + tp->irq_max = 1; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { - tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && tp->pdev_peer == tp->pdev)) - tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; + tg3_flag_clear(tp, SUPPORT_MSI); - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; + tg3_flag_set(tp, 1SHOT_MSI); } - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { - tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); tp->irq_max = TG3_IRQ_MAX_VECS; } } + /* All chips can get confused if TX buffers + * straddle the 4GB address boundary. + */ + tg3_flag_set(tp, 4G_DMA_BNDRY_BUG); + + if (tg3_flag(tp, 5755_PLUS)) + tg3_flag_set(tp, SHORT_DMA_BUG); + else + tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; - else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { - tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; - tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; - } + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_flag_set(tp,LRG_PROD_RING_CAP); - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) - tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; + if (tg3_flag(tp, 57765_PLUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) - tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); @@ -13350,45 +13776,12 @@ if (tp->pcie_cap != 0) { u16 lnkctl; - tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; + tg3_flag_set(tp, PCI_EXPRESS); tp->pcie_readrq = 4096; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { - u16 word; - - pci_read_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKSTA, - &word); - switch (word & PCI_EXP_LNKSTA_CLS) { - case PCI_EXP_LNKSTA_CLS_2_5GB: - word &= PCI_EXP_LNKSTA_NLW; - word >>= PCI_EXP_LNKSTA_NLW_SHIFT; - switch (word) { - case 2: - tp->pcie_readrq = 2048; - break; - case 4: - tp->pcie_readrq = 1024; - break; - } - break; - - case PCI_EXP_LNKSTA_CLS_5_0GB: - word &= PCI_EXP_LNKSTA_NLW; - word >>= PCI_EXP_LNKSTA_NLW_SHIFT; - switch (word) { - case 1: - tp->pcie_readrq = 2048; - break; - case 2: - tp->pcie_readrq = 1024; - break; - case 4: - tp->pcie_readrq = 512; - break; - } - } - } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->pcie_readrq = 2048; pcie_set_readrq(tp->pdev, tp->pcie_readrq); @@ -13397,19 +13790,20 @@ &lnkctl); if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) - tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; + tg3_flag_set(tp, CLKREQ_BUG); } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { - tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN; + tg3_flag_set(tp, L1PLLPD_EN); } } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { - tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; - } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); if (!tp->pcix_cap) { dev_err(&tp->pdev->dev, @@ -13418,7 +13812,7 @@ } if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) - tp->tg3_flags |= TG3_FLAG_PCIX_MODE; + tg3_flag_set(tp, PCIX_MODE); } /* If we have an AMD 762 or VIA K8T800 chipset, write @@ -13427,9 +13821,9 @@ * every mailbox register write to force the writes to be * posted to the chip in order. */ - if (pci_dev_present(write_reorder_chipsets) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) - tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; + if (pci_dev_present(tg3_write_reorder_chipsets) && + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &tp->pci_cacheline_sz); @@ -13446,17 +13840,17 @@ /* 5700 BX chips need to have their TX producer index * mailboxes written twice to workaround a bug. */ - tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; + tg3_flag_set(tp, TXD_MBOX_HWBUG); /* If we are in PCI-X mode, enable register write workaround. * * The workaround is to use indirect register accesses * for all chip writes not to mailbox registers. */ - if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + if (tg3_flag(tp, PCIX_MODE)) { u32 pm_reg; - tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; + tg3_flag_set(tp, PCIX_TARGET_HWBUG); /* The chip can have it's power management PCI config * space registers clobbered due to this bug. @@ -13479,9 +13873,9 @@ } if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) - tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; + tg3_flag_set(tp, PCI_HIGH_SPEED); if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) - tp->tg3_flags |= TG3_FLAG_PCI_32BIT; + tg3_flag_set(tp, PCI_32BIT); /* Chip-specific fixup from Broadcom driver */ if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && @@ -13499,10 +13893,10 @@ tp->write32_rx_mbox = tg3_write32; /* Various workaround register access methods */ - if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) tp->write32 = tg3_write_indirect_reg32; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || - ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && + (tg3_flag(tp, PCI_EXPRESS) && tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { /* * Back to back register writes can cause problems on these @@ -13514,14 +13908,13 @@ tp->write32 = tg3_write_flush_reg32; } - if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || - (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; - if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) + if (tg3_flag(tp, MBOX_WRITE_REORDER)) tp->write32_rx_mbox = tg3_write_flush_reg32; } - if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { + if (tg3_flag(tp, ICH_WORKAROUND)) { tp->read32 = tg3_read_indirect_reg32; tp->write32 = tg3_write_indirect_reg32; tp->read32_mbox = tg3_read_indirect_mbox; @@ -13544,13 +13937,13 @@ } if (tp->write32 == tg3_write_indirect_reg32 || - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && + (tg3_flag(tp, PCIX_MODE) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) - tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; + tg3_flag_set(tp, SRAM_USE_CONFIG); /* Get eeprom hw config before calling tg3_set_power_state(). - * In particular, the TG3_FLG2_IS_NIC flag must be + * In particular, the TG3_FLAG_IS_NIC flag must be * determined before calling tg3_set_power_state() so that * we know whether or not to switch out of Vaux power. * When the flag is set, it means that GPIO1 is used for eeprom @@ -13559,7 +13952,7 @@ */ tg3_get_eeprom_hw_cfg(tp); - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. */ @@ -13574,16 +13967,16 @@ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) - tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, CPMU_PRESENT); - /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). + /* Set up tp->grc_local_ctrl before calling tg3_power_up(). * GPIO1 driven high will bring 5700's external PHY out of reset. * It is also used as eeprom write protect on LOMs. */ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || - (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); /* Unused GPIO3 must be driven as output on 5752 because there @@ -13601,14 +13994,14 @@ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { /* Turn off the debug UART. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; - if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) + if (tg3_flag(tp, IS_NIC)) /* Keep VMain power. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OUTPUT0; } /* Force the chip into D0. */ - err = tg3_set_power_state(tp, PCI_D0); + err = tg3_power_up(tp); if (err) { dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); return err; @@ -13617,26 +14010,25 @@ /* Derive initial jumbo mode from MTU assigned in * ether_setup() via the alloc_etherdev() call */ - if (tp->dev->mtu > ETH_DATA_LEN && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) - tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); /* Determine WakeOnLan speed to use. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { - tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); + tg3_flag_clear(tp, WOL_SPEED_100MB); } else { - tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; + tg3_flag_set(tp, WOL_SPEED_100MB); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->phy_flags |= TG3_PHYFLG_IS_FET; /* A few boards don't want Ethernet@WireSpeed phy feature */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || - ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || (tp->phy_flags & TG3_PHYFLG_IS_FET) || @@ -13649,11 +14041,11 @@ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; - if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + if (tg3_flag(tp, 5705_PLUS) && !(tp->phy_flags & TG3_PHYFLG_IS_FET) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && - !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { + !tg3_flag(tp, 57765_PLUS)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || @@ -13674,7 +14066,7 @@ tp->phy_otp = TG3_OTP_DEFAULT; } - if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) + if (tg3_flag(tp, CPMU_PRESENT)) tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; else tp->mi_mode = MAC_MI_MODE_BASE; @@ -13684,9 +14076,17 @@ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + /* Set these bits to enable statistics workaround. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; + tg3_flag_set(tp, USE_PHYLIB); err = tg3_mdio_init(tp); if (err) @@ -13694,7 +14094,15 @@ /* Initialize data/descriptor byte/word swapping. */ val = tr32(GRC_MODE); - val &= GRC_MODE_HOST_STACKUP; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + tw32(GRC_MODE, val | tp->grc_mode); tg3_switch_clocks(tp); @@ -13705,7 +14113,7 @@ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && - (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); if (chiprevid == CHIPREV_ID_5701_A0 || @@ -13724,7 +14132,7 @@ writel(0x00000000, sram_base + 4); writel(0xffffffff, sram_base + 4); if (readl(sram_base) != 0x00000000) - tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; + tg3_flag_set(tp, PCIX_TARGET_HWBUG); } } @@ -13737,12 +14145,12 @@ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) - tp->tg3_flags2 |= TG3_FLG2_IS_5788; + tg3_flag_set(tp, IS_5788); - if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && - (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) - tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + if (!tg3_flag(tp, IS_5788) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | HOSTCC_MODE_CLRTICK_TXBD); @@ -13752,9 +14160,8 @@ } /* Preserve the APE MAC_MODE bits */ - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) - tp->mac_mode = tr32(MAC_MODE) | - MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; else tp->mac_mode = TG3_DEF_MAC_MODE; @@ -13800,9 +14207,9 @@ * status register in those cases. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; + tg3_flag_set(tp, USE_LINKCHG_REG); else - tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; + tg3_flag_clear(tp, USE_LINKCHG_REG); /* The led_ctrl is set during tg3_phy_probe, here we might * have to force the link status polling mechanism based @@ -13812,19 +14219,19 @@ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; - tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; + tg3_flag_set(tp, USE_LINKCHG_REG); } /* For all SERDES we poll the MAC status register. */ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - tp->tg3_flags |= TG3_FLAG_POLL_SERDES; + tg3_flag_set(tp, POLL_SERDES); else - tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; + tg3_flag_clear(tp, POLL_SERDES); tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { + tg3_flag(tp, PCIX_MODE)) { tp->rx_offset -= NET_IP_ALIGN; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; @@ -13845,7 +14252,7 @@ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tp->rx_std_max_post = 8; - if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) + if (tg3_flag(tp, ASPM_WORKAROUND)) tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & PCIE_PWR_MGMT_L1_THRESH_MSK; @@ -13892,16 +14299,15 @@ #endif mac_offset = 0x7c; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) mac_offset = 0xcc; if (tg3_nvram_lock(tp)) tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); else tg3_nvram_unlock(tp); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + } else if (tg3_flag(tp, 5717_PLUS)) { if (PCI_FUNC(tp->pdev->devfn) & 1) mac_offset = 0xcc; if (PCI_FUNC(tp->pdev->devfn) > 1) @@ -13926,7 +14332,7 @@ } if (!addr_ok) { /* Next, try NVRAM. */ - if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && + if (!tg3_flag(tp, NO_NVRAM) && !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); @@ -13977,7 +14383,7 @@ */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + !tg3_flag(tp, PCI_EXPRESS)) goto out; #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) @@ -13990,7 +14396,7 @@ #endif #endif - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { + if (tg3_flag(tp, 57765_PLUS)) { val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; goto out; } @@ -14009,8 +14415,7 @@ * other than 5700 and 5701 which do not implement the * boundary bits. */ - if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { switch (cacheline_size) { case 16: case 32: @@ -14035,7 +14440,7 @@ DMA_RWCTRL_WRITE_BNDRY_384_PCIX); break; } - } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + } else if (tg3_flag(tp, PCI_EXPRESS)) { switch (cacheline_size) { case 16: case 32: @@ -14184,13 +14589,19 @@ #define TEST_BUFFER_SIZE 0x2000 +static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, +}; + static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; u32 *buf, saved_dma_rwctrl; int ret = 0; - buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out_nofree; @@ -14201,13 +14612,13 @@ tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) + if (tg3_flag(tp, 57765_PLUS)) goto out; - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if (tg3_flag(tp, PCI_EXPRESS)) { /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; - } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { + } else if (!tg3_flag(tp, PCIX_MODE)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) tp->dma_rwctrl |= 0x003f0000; @@ -14223,7 +14634,7 @@ * do the less restrictive ONE_DMA workaround for * better performance. */ - if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && + if (tg3_flag(tp, 40BIT_DMA_BUG) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) tp->dma_rwctrl |= 0x8000; else if (ccval == 0x6 || ccval == 0x7) @@ -14352,17 +14763,11 @@ } if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != DMA_RWCTRL_WRITE_BNDRY_16) { - static struct pci_device_id dma_wait_state_chipsets[] = { - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, - PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, - { }, - }; - /* DMA test passed without adjusting DMA boundary, * now look for chipsets that are known to expose the * DMA bug without failing the test. */ - if (pci_dev_present(dma_wait_state_chipsets)) { + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; } else { @@ -14374,31 +14779,14 @@ } out: - pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); out_nofree: return ret; } -static void __devinit tg3_init_link_config(struct tg3 *tp) -{ - tp->link_config.advertising = - (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_MII); - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; - tp->link_config.autoneg = AUTONEG_ENABLE; - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tp->link_config.orig_speed = SPEED_INVALID; - tp->link_config.orig_duplex = DUPLEX_INVALID; - tp->link_config.orig_autoneg = AUTONEG_INVALID; -} - static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) { - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { + if (tg3_flag(tp, 57765_PLUS)) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = @@ -14412,7 +14800,7 @@ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; tp->bufmgr_config.mbuf_high_water_jumbo = DEFAULT_MB_HIGH_WATER_JUMBO_57765; - } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + } else if (tg3_flag(tp, 5705_PLUS)) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = @@ -14476,6 +14864,7 @@ case TG3_PHY_ID_BCM5718S: return "5718S"; case TG3_PHY_ID_BCM57765: return "57765"; case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; case TG3_PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -14484,10 +14873,10 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) { - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if (tg3_flag(tp, PCI_EXPRESS)) { strcpy(str, "PCI Express"); return str; - } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + } else if (tg3_flag(tp, PCIX_MODE)) { u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; strcpy(str, "PCIX:"); @@ -14506,12 +14895,12 @@ strcat(str, "100MHz"); } else { strcpy(str, "PCI:"); - if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) + if (tg3_flag(tp, PCI_HIGH_SPEED)) strcat(str, "66MHz"); else strcat(str, "33MHz"); } - if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) + if (tg3_flag(tp, PCI_32BIT)) strcat(str, ":32-bit"); else strcat(str, ":64-bit"); @@ -14570,7 +14959,7 @@ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; } - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (tg3_flag(tp, 5705_PLUS)) { ec->rx_coalesce_usecs_irq = 0; ec->tx_coalesce_usecs_irq = 0; ec->stats_block_coalesce_usecs = 0; @@ -14596,25 +14985,6 @@ #endif }; -static const struct net_device_ops tg3_netdev_ops_dma_bug = { - .ndo_open = tg3_open, - .ndo_stop = tg3_close, - .ndo_start_xmit = tg3_start_xmit_dma_bug, - .ndo_get_stats = tg3_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = tg3_set_rx_mode, - .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, - .ndo_tx_timeout = tg3_tx_timeout, - .ndo_change_mtu = tg3_change_mtu, -#if TG3_VLAN_TAG_USED - .ndo_vlan_rx_register = tg3_vlan_rx_register, -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tg3_poll_controller, -#endif -}; - static int __devinit tg3_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -14624,6 +14994,7 @@ u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; + u32 features = 0; printk_once(KERN_INFO "%s\n", version); @@ -14662,7 +15033,6 @@ #if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; #endif - tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; @@ -14707,13 +15077,12 @@ goto err_out_free_dev; } - tg3_init_link_config(tp); - tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; dev->ethtool_ops = &tg3_ethtool_ops; dev->watchdog_timeo = TG3_TX_TIMEOUT; + dev->netdev_ops = &tg3_netdev_ops; dev->irq = pdev->irq; err = tg3_get_invariants(tp); @@ -14723,23 +15092,15 @@ goto err_out_iounmap; } - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) - dev->netdev_ops = &tg3_netdev_ops; - else - dev->netdev_ops = &tg3_netdev_ops_dma_bug; - - /* The EPB bridge inside 5714, 5715, and 5780 and any * device behind the EPB cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and * do DMA address check in tg3_start_xmit(). */ - if (tp->tg3_flags2 & TG3_FLG2_IS_5788) + if (tg3_flag(tp, IS_5788)) persist_dma_mask = dma_mask = DMA_BIT_MASK(32); - else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { persist_dma_mask = dma_mask = DMA_BIT_MASK(40); #ifdef CONFIG_HIGHMEM dma_mask = DMA_BIT_MASK(64); @@ -14751,7 +15112,7 @@ if (dma_mask > DMA_BIT_MASK(32)) { err = pci_set_dma_mask(pdev, dma_mask); if (!err) { - dev->features |= NETIF_F_HIGHDMA; + features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, persist_dma_mask); if (err < 0) { @@ -14772,48 +15133,53 @@ tg3_init_bufmgr_config(tp); - /* Selectively allow TSO based on operating conditions */ - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || - (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; - else { - tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); - tp->fw_needed = NULL; - } + features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) - tp->fw_needed = FIRMWARE_TG3; + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_GRO; + + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; + } /* TSO is on by default on chips that support hardware TSO. * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && - (dev->features & NETIF_F_IP_CSUM)) { - dev->features |= NETIF_F_TSO; + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & NETIF_F_IP_CSUM)) { + features |= NETIF_F_TSO; vlan_features_add(dev, NETIF_F_TSO); } - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { - if (dev->features & NETIF_F_IPV6_CSUM) { - dev->features |= NETIF_F_TSO6; + + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) { + features |= NETIF_F_TSO6; vlan_features_add(dev, NETIF_F_TSO6); } - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + if (tg3_flag(tp, HW_TSO_3) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { - dev->features |= NETIF_F_TSO_ECN; + features |= NETIF_F_TSO_ECN; vlan_features_add(dev, NETIF_F_TSO_ECN); } } + dev->features |= features; + dev->vlan_features |= features; + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && - !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && + !tg3_flag(tp, TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { - tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; + tg3_flag_set(tp, MAX_RXPEND_64); tp->rx_pending = 63; } @@ -14824,7 +15190,7 @@ goto err_out_iounmap; } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (tg3_flag(tp, ENABLE_APE)) { tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { dev_err(&pdev->dev, @@ -14835,7 +15201,7 @@ tg3_ape_lock_init(tp); - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) + if (tg3_flag(tp, ENABLE_ASF)) tg3_read_dash_ver(tp); } @@ -14856,14 +15222,10 @@ goto err_out_apeunmap; } - /* flow control autonegotiation is default behavior */ - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; - tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; - for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { + for (i = 0; i < tp->irq_max; i++) { struct tg3_napi *tnapi = &tp->napi[i]; tnapi->tp = tp; @@ -14878,15 +15240,12 @@ tnapi->consmbox = rcvmbx; tnapi->prodmbox = sndmbx; - if (i) { + if (i) tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); - netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); - } else { + else tnapi->coal_now = HOSTCC_MODE_NOW; - netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); - } - if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) + if (!tg3_flag(tp, SUPPORT_MSIX)) break; /* @@ -14917,6 +15276,9 @@ goto err_out_apeunmap; } + /* RHEL: Set an initial value for operstate, after registration */ + netif_carrier_off(dev); + netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", tp->board_part_number, tp->pci_chip_rev_id, @@ -14940,21 +15302,25 @@ ethtype = "10/100/1000Base-T"; netdev_info(dev, "attached PHY is %s (%s Ethernet) " - "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype, - (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0); + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); } netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", - (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, - (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, + tg3_flag(tp, RX_CHECKSUMS) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, - (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", tp->dma_rwctrl, pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); + pci_save_state(pdev); + return 0; err_out_apeunmap: @@ -14993,7 +15359,7 @@ flush_scheduled_work(); - if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } @@ -15014,19 +15380,14 @@ } } -static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int tg3_suspend(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - pci_power_t target_state; int err; - /* PCI register 4 needs to be saved whether netif_running() or not. - * MSI address and data need to be saved if using MSI and - * netif_running(). - */ - pci_save_state(pdev); - if (!netif_running(dev)) return 0; @@ -15044,18 +15405,16 @@ tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; + tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); - target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; - - err = tg3_set_power_state(tp, target_state); + err = tg3_power_down_prepare(tp); if (err) { int err2; tg3_full_lock(tp, 0); - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); err2 = tg3_restart_hw(tp, 1); if (err2) goto out; @@ -15076,26 +15435,21 @@ return err; } -static int tg3_resume(struct pci_dev *pdev) +static int tg3_resume(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); int err; - pci_restore_state(tp->pdev); - if (!netif_running(dev)) return 0; - err = tg3_set_power_state(tp, PCI_D0); - if (err) - return err; - netif_device_attach(dev); tg3_full_lock(tp, 0); - tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, 1); if (err) goto out; @@ -15114,13 +15468,166 @@ return err; } +static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +#define TG3_PM_OPS (&tg3_pm_ops) + +#else + +#define TG3_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +/** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + tg3_flag_clear(tp, RESTART_TIMER); + + /* Want to make sure that the reset task doesn't run */ + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + tg3_flag_clear(tp, RESTART_TIMER); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + +done: + if (state == pci_channel_io_perm_failure) + err = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return err; +} + +/** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) { + netdev_err(netdev, "Failed to restore register access.\n"); + goto done; + } + + rc = PCI_ERS_RESULT_RECOVERED; + +done: + rtnl_unlock(); + + return rc; +} + +/** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void tg3_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + tg3_full_unlock(tp); + if (err) { + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + tg3_phy_start(tp); + +done: + rtnl_unlock(); +} + +static struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume +}; + static struct pci_driver tg3_driver = { .name = DRV_MODULE_NAME, .id_table = tg3_pci_tbl, .probe = tg3_init_one, .remove = __devexit_p(tg3_remove_one), - .suspend = tg3_suspend, - .resume = tg3_resume + .err_handler = &tg3_err_handler, + .driver.pm = TG3_PM_OPS, }; static int __init tg3_init(void) --- /build/BUILD/kernel-2.6.32-131.0.15.el6/linux-2.6.32-27.mlab.mlab.i686/drivers/net/tg3.h 2012-06-19 17:20:05.376166782 -0400 +++ linux-2.6.32-220.el6/drivers/net/tg3.h 2012-06-19 17:20:29.182150176 -0400 @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2010 Broadcom Corporation. + * Copyright (C) 2007-2011 Broadcom Corporation. */ #ifndef _T3_H @@ -12,6 +12,7 @@ #define TG3_64BIT_REG_HIGH 0x00UL #define TG3_64BIT_REG_LOW 0x04UL +#define PCI_VPD_RO_KEYWORD_CHKSUM "RV" /* Descriptor block info. */ #define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */ @@ -23,11 +24,13 @@ #define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ #define TG3_BDINFO_SIZE 0x10UL -#define TG3_RX_INTERNAL_RING_SZ_5906 32 - -#define RX_STD_MAX_SIZE_5705 512 -#define RX_STD_MAX_SIZE_5717 2048 -#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ +#define TG3_RX_STD_MAX_SIZE_5700 512 +#define TG3_RX_STD_MAX_SIZE_5717 2048 +#define TG3_RX_JMB_MAX_SIZE_5700 256 +#define TG3_RX_JMB_MAX_SIZE_5717 1024 +#define TG3_RX_RET_MAX_SIZE_5700 1024 +#define TG3_RX_RET_MAX_SIZE_5705 512 +#define TG3_RX_RET_MAX_SIZE_5717 4096 /* First 256 bytes are a mirror of PCI config space. */ #define TG3PCI_VENDOR 0x00000000 @@ -47,7 +50,6 @@ #define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ #define TG3PCI_DEVICE_TIGON3_5717 0x1655 #define TG3PCI_DEVICE_TIGON3_5718 0x1656 -#define TG3PCI_DEVICE_TIGON3_5724 0x165c #define TG3PCI_DEVICE_TIGON3_57781 0x16b1 #define TG3PCI_DEVICE_TIGON3_57785 0x16b5 #define TG3PCI_DEVICE_TIGON3_57761 0x16b0 @@ -55,6 +57,7 @@ #define TG3PCI_DEVICE_TIGON3_57791 0x16b2 #define TG3PCI_DEVICE_TIGON3_57795 0x16b6 #define TG3PCI_DEVICE_TIGON3_5719 0x1657 +#define TG3PCI_DEVICE_TIGON3_5720 0x165f /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 @@ -142,6 +145,8 @@ #define CHIPREV_ID_57780_A1 0x57780001 #define CHIPREV_ID_5717_A0 0x05717000 #define CHIPREV_ID_57765_A0 0x57785000 +#define CHIPREV_ID_5719_A0 0x05719000 +#define CHIPREV_ID_5720_A0 0x05720000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -163,6 +168,7 @@ #define ASIC_REV_5717 0x5717 #define ASIC_REV_57765 0x57785 #define ASIC_REV_5719 0x5719 +#define ASIC_REV_5720 0x5720 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -175,6 +181,7 @@ #define CHIPREV_5750_BX 0x41 #define CHIPREV_5784_AX 0x57840 #define CHIPREV_5761_AX 0x57610 +#define CHIPREV_57765_AX 0x577650 #define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff) #define METAL_REV_A0 0x00 #define METAL_REV_A1 0x01 @@ -183,6 +190,7 @@ #define METAL_REV_B2 0x02 #define TG3PCI_DMA_RW_CTRL 0x0000006c #define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 +#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080 #define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 #define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 #define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 @@ -473,6 +481,8 @@ #define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 #define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 #define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 +#define TX_MODE_JMB_FRM_LEN 0x00400000 +#define TX_MODE_CNT_DN_MODE 0x00800000 #define MAC_TX_STATUS 0x00000460 #define TX_STATUS_XOFFED 0x00000001 #define TX_STATUS_SENT_XOFF 0x00000002 @@ -487,6 +497,8 @@ #define TX_LENGTHS_IPG_SHIFT 8 #define TX_LENGTHS_IPG_CRS_MASK 0x00003000 #define TX_LENGTHS_IPG_CRS_SHIFT 12 +#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000 +#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000 #define MAC_RX_MODE 0x00000468 #define RX_MODE_RESET 0x00000001 #define RX_MODE_ENABLE 0x00000002 @@ -1079,6 +1091,9 @@ #define CPMU_HST_ACC_MACCLK_6_25 0x00130000 /* 0x3620 --> 0x3630 unused */ +#define TG3_CPMU_CLCK_ORIDE 0x00003624 +#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 + #define TG3_CPMU_CLCK_STAT 0x00003630 #define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 #define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 @@ -1106,7 +1121,7 @@ #define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 #define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff #define TG3_CPMU_EEE_DBTMR2 0x000036b8 -#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 #define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff #define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc #define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 @@ -1188,6 +1203,7 @@ #define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 #define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 #define HOSTCC_FLOW_ATTN 0x00003c48 +#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040 /* 0x3c4c --> 0x3c50 unused */ #define HOSTCC_JUMBO_CON_IDX 0x00003c50 #define HOSTCC_STD_CON_IDX 0x00003c54 @@ -1321,6 +1337,7 @@ #define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 #define RDMAC_MODE_IPV4_LSO_EN 0x08000000 #define RDMAC_MODE_IPV6_LSO_EN 0x10000000 +#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000 #define RDMAC_STATUS 0x00004804 #define RDMAC_STATUS_TGTABORT 0x00000004 #define RDMAC_STATUS_MSTABORT 0x00000008 @@ -1334,6 +1351,10 @@ #define TG3_RDMA_RSRVCTRL_REG 0x00004900 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 #define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 #define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 /* 0x4904 --> 0x4910 unused */ @@ -1593,6 +1614,7 @@ #define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 #define MSGINT_MODE_MULTIVEC_EN 0x00000080 #define MSGINT_STATUS 0x00006004 +#define MSGINT_STATUS_MSI_REQ 0x00000001 #define MSGINT_FIFO 0x00006008 /* 0x600c --> 0x6400 unused */ @@ -1609,6 +1631,8 @@ #define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 #define GRC_MODE_BSWAP_DATA 0x00000010 #define GRC_MODE_WSWAP_DATA 0x00000020 +#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040 +#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080 #define GRC_MODE_SPLITHDR 0x00000100 #define GRC_MODE_NOFRM_CRACKING 0x00000200 #define GRC_MODE_INCL_CRC 0x00000400 @@ -1616,8 +1640,10 @@ #define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 #define GRC_MODE_NOIRQ_ON_RCV 0x00004000 #define GRC_MODE_FORCE_PCI32BIT 0x00008000 +#define GRC_MODE_B2HRX_ENABLE 0x00008000 #define GRC_MODE_HOST_STACKUP 0x00010000 #define GRC_MODE_HOST_SENDBDS 0x00020000 +#define GRC_MODE_HTX2B_ENABLE 0x00040000 #define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 #define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 #define GRC_MODE_PCIE_TL_SEL 0x00000000 @@ -1814,6 +1840,38 @@ #define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 #define FLASH_5717VENDOR_ST_25USPT 0x03400002 #define FLASH_5717VENDOR_ST_45USPT 0x03400001 +#define FLASH_5720_EEPROM_HD 0x00000001 +#define FLASH_5720_EEPROM_LD 0x00000003 +#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 +#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 +#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 +#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003 +#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000 +#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002 +#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001 +#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003 +#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000 +#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002 +#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001 +#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003 +#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000 +#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002 +#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001 +#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000 +#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002 +#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001 +#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003 +#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000 +#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002 +#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001 +#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003 +#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000 +#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002 +#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001 +#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003 +#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000 +#define FLASH_5720VENDOR_ST_25USPT 0x03c00002 +#define FLASH_5720VENDOR_ST_45USPT 0x03c00001 #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 #define FLASH_5752PAGE_SIZE_256 0x00000000 #define FLASH_5752PAGE_SIZE_512 0x10000000 @@ -1895,11 +1953,16 @@ /* Alternate PCIE definitions */ #define TG3_PCIE_TLDLPL_PORT 0x00007c00 +#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c +#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff +#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c #define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 #define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 #define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 #define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 +#define TG3_REG_BLK_SIZE 0x00008000 + /* OTP bit definitions */ #define TG3_OTP_AGCTGT_MASK 0x000000e0 #define TG3_OTP_AGCTGT_SHIFT 1 @@ -1943,6 +2006,7 @@ #define TG3_EEPROM_SB_REVISION_3 0x00030000 #define TG3_EEPROM_SB_REVISION_4 0x00040000 #define TG3_EEPROM_SB_REVISION_5 0x00050000 +#define TG3_EEPROM_SB_REVISION_6 0x00060000 #define TG3_EEPROM_MAGIC_HW 0xabcd #define TG3_EEPROM_MAGIC_HW_MSK 0xffff @@ -1950,7 +2014,9 @@ #define TG3_NVM_DIR_END 0x78 #define TG3_NVM_DIRENT_SIZE 0xc #define TG3_NVM_DIRTYPE_SHIFT 24 +#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff #define TG3_NVM_DIRTYPE_ASFINI 1 +#define TG3_NVM_DIRTYPE_EXTVPD 20 #define TG3_NVM_PTREV_BCVER 0x94 #define TG3_NVM_BCVER_MAJMSK 0x0000ff00 #define TG3_NVM_BCVER_MAJSFT 8 @@ -1962,6 +2028,7 @@ #define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 #define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c #define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 +#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c #define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 #define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 #define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff @@ -2073,6 +2140,13 @@ #define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 #define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 + +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64 +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16 + /* Currently this is fixed. */ #define TG3_PHY_MII_ADDR 0x01 @@ -2107,9 +2181,13 @@ #define MII_TG3_DSP_TAP1 0x0001 #define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 +#define MII_TG3_DSP_TAP26 0x001a +#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 +#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 +#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 #define MII_TG3_DSP_AADJ1CH0 0x001f #define MII_TG3_DSP_CH34TP2 0x4022 -#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 +#define MII_TG3_DSP_CH34TP2_HIBW01 0x017b #define MII_TG3_DSP_AADJ1CH3 0x601f #define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 #define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 @@ -2120,23 +2198,30 @@ #define MII_TG3_DSP_EXP96 0x0f96 #define MII_TG3_DSP_EXP97 0x0f97 -#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ +#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */ + +#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 +#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 +#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000 +#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 +#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008 #define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 #define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 +#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040 #define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 -#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 -#define MII_TG3_AUXCTL_MISC_WREN 0x8000 -#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000 +#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004 + #define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010 +#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12 +#define MII_TG3_AUXCTL_MISC_WREN 0x8000 -#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 -#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 -#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 -#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */ +#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */ #define MII_TG3_AUX_STAT_LPASS 0x0004 #define MII_TG3_AUX_STAT_SPDMASK 0x0700 #define MII_TG3_AUX_STAT_10HALF 0x0100 @@ -2226,7 +2311,7 @@ #define TG3_APE_HOST_SEG_SIG 0x4200 #define APE_HOST_SEG_SIG_MAGIC 0x484f5354 #define TG3_APE_HOST_SEG_LEN 0x4204 -#define APE_HOST_SEG_LEN_MAGIC 0x0000001c +#define APE_HOST_SEG_LEN_MAGIC 0x00000020 #define TG3_APE_HOST_INIT_COUNT 0x4208 #define TG3_APE_HOST_DRIVER_ID 0x420c #define APE_HOST_DRIVER_ID_LINUX 0xf0000000 @@ -2238,6 +2323,12 @@ #define APE_HOST_HEARTBEAT_INT_DISABLE 0 #define APE_HOST_HEARTBEAT_INT_5SEC 5000 #define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 +#define TG3_APE_HOST_DRVR_STATE 0x421c +#define TG3_APE_HOST_DRVR_STATE_START 0x00000001 +#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 +#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003 +#define TG3_APE_HOST_WOL_SPEED 0x4224 +#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000 #define TG3_APE_EVENT_STATUS 0x4300 @@ -2548,7 +2639,12 @@ tg3_stat64_t nic_avoided_irqs; tg3_stat64_t nic_tx_threshold_hit; - u8 __reserved4[0xb00-0x9c0]; + /* NOT a part of the hardware statistics block format. + * These stats are here as storage for tg3_periodic_fetch_stats(). + */ + tg3_stat64_t mbuf_lwm_thresh_hit; + + u8 __reserved4[0xb00-0x9c8]; }; /* 'mapping' is superfluous as the chip does not write into @@ -2684,6 +2780,8 @@ u64 nic_irqs; u64 nic_avoided_irqs; u64 nic_tx_threshold_hit; + + u64 mbuf_lwm_thresh_hit; }; struct tg3_rx_prodring_set { @@ -2699,13 +2797,15 @@ dma_addr_t rx_jmb_mapping; }; -#define TG3_IRQ_MAX_VECS 5 +#define TG3_IRQ_MAX_VECS_RSS 5 +#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS struct tg3_napi { struct napi_struct napi ____cacheline_aligned; struct tg3 *tp; struct tg3_hw_status *hw_status; + u32 chk_msi_cnt; u32 last_tag; u32 last_irq_tag; u32 int_mbox; @@ -2713,12 +2813,14 @@ u32 tx_prod; u32 tx_cons; u32 tx_pending; + u32 last_tx_cons; u32 prodmbox; u32 consmbox; u32 rx_rcb_ptr; + u32 last_rx_cons; u16 *rx_rcb_prod_idx; - struct tg3_rx_prodring_set *prodring; + struct tg3_rx_prodring_set prodring; struct tg3_rx_buffer_desc *rx_rcb; struct tg3_tx_buffer_desc *tx_ring; @@ -2732,6 +2834,87 @@ unsigned int irq_vec; }; +enum TG3_FLAGS { + TG3_FLAG_TAGGED_STATUS = 0, + TG3_FLAG_TXD_MBOX_HWBUG, + TG3_FLAG_USE_LINKCHG_REG, + TG3_FLAG_ERROR_PROCESSED, + TG3_FLAG_ENABLE_ASF, + TG3_FLAG_ASPM_WORKAROUND, + TG3_FLAG_POLL_SERDES, + TG3_FLAG_MBOX_WRITE_REORDER, + TG3_FLAG_PCIX_TARGET_HWBUG, + TG3_FLAG_WOL_SPEED_100MB, + TG3_FLAG_WOL_ENABLE, + TG3_FLAG_EEPROM_WRITE_PROT, + TG3_FLAG_NVRAM, + TG3_FLAG_NVRAM_BUFFERED, + TG3_FLAG_SUPPORT_MSI, + TG3_FLAG_SUPPORT_MSIX, + TG3_FLAG_PCIX_MODE, + TG3_FLAG_PCI_HIGH_SPEED, + TG3_FLAG_PCI_32BIT, + TG3_FLAG_SRAM_USE_CONFIG, + TG3_FLAG_TX_RECOVERY_PENDING, + TG3_FLAG_WOL_CAP, + TG3_FLAG_JUMBO_RING_ENABLE, + TG3_FLAG_PAUSE_AUTONEG, + TG3_FLAG_CPMU_PRESENT, + TG3_FLAG_40BIT_DMA_BUG, + TG3_FLAG_BROKEN_CHECKSUMS, + TG3_FLAG_JUMBO_CAPABLE, + TG3_FLAG_CHIP_RESETTING, + TG3_FLAG_INIT_COMPLETE, + TG3_FLAG_RESTART_TIMER, + TG3_FLAG_TSO_BUG, + TG3_FLAG_IS_5788, + TG3_FLAG_MAX_RXPEND_64, + TG3_FLAG_TSO_CAPABLE, + TG3_FLAG_PCI_EXPRESS, + TG3_FLAG_ASF_NEW_HANDSHAKE, + TG3_FLAG_HW_AUTONEG, + TG3_FLAG_IS_NIC, + TG3_FLAG_FLASH, + TG3_FLAG_HW_TSO_1, + TG3_FLAG_5705_PLUS, + TG3_FLAG_5750_PLUS, + TG3_FLAG_HW_TSO_3, + TG3_FLAG_USING_MSI, + TG3_FLAG_USING_MSIX, + TG3_FLAG_ICH_WORKAROUND, + TG3_FLAG_5780_CLASS, + TG3_FLAG_HW_TSO_2, + TG3_FLAG_1SHOT_MSI, + TG3_FLAG_NO_FWARE_REPORTED, + TG3_FLAG_NO_NVRAM_ADDR_TRANS, + TG3_FLAG_ENABLE_APE, + TG3_FLAG_PROTECTED_NVRAM, + TG3_FLAG_5701_DMA_BUG, + TG3_FLAG_USE_PHYLIB, + TG3_FLAG_MDIOBUS_INITED, + TG3_FLAG_LRG_PROD_RING_CAP, + TG3_FLAG_RGMII_INBAND_DISABLE, + TG3_FLAG_RGMII_EXT_IBND_RX_EN, + TG3_FLAG_RGMII_EXT_IBND_TX_EN, + TG3_FLAG_CLKREQ_BUG, + TG3_FLAG_5755_PLUS, + TG3_FLAG_NO_NVRAM, + TG3_FLAG_ENABLE_RSS, + TG3_FLAG_ENABLE_TSS, + TG3_FLAG_4G_DMA_BNDRY_BUG, + TG3_FLAG_40BIT_DMA_LIMIT_BUG, + TG3_FLAG_SHORT_DMA_BUG, + TG3_FLAG_USE_JUMBO_BDFLAG, + TG3_FLAG_L1PLLPD_EN, + TG3_FLAG_57765_PLUS, + TG3_FLAG_APE_HAS_NCSI, + TG3_FLAG_5717_PLUS, + TG3_FLAG_RX_CHECKSUMS, + + /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ + TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ +}; + struct tg3 { /* begin "general, frequently-used members" cacheline section */ @@ -2755,7 +2938,7 @@ /* SMP locking strategy: * * lock: Held during reset, PHY access, timer, and when - * updating tg3_flags and tg3_flags2. + * updating tg3_flags. * * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds * netif_tx_lock when it needs to call @@ -2808,8 +2991,6 @@ struct vlan_group *vlgrp; #endif - struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS]; - /* begin "everything else" cacheline(s) section */ struct net_device_stats net_stats; @@ -2817,93 +2998,13 @@ struct tg3_ethtool_stats estats; struct tg3_ethtool_stats estats_prev; + DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); + union { unsigned long phy_crc_errors; unsigned long last_event_jiffies; }; - u32 tg3_flags; -#define TG3_FLAG_TAGGED_STATUS 0x00000001 -#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 -#define TG3_FLAG_RX_CHECKSUMS 0x00000004 -#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 -#define TG3_FLAG_ENABLE_ASF 0x00000020 -#define TG3_FLAG_ASPM_WORKAROUND 0x00000040 -#define TG3_FLAG_POLL_SERDES 0x00000080 -#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100 -#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200 -#define TG3_FLAG_WOL_SPEED_100MB 0x00000400 -#define TG3_FLAG_WOL_ENABLE 0x00000800 -#define TG3_FLAG_EEPROM_WRITE_PROT 0x00001000 -#define TG3_FLAG_NVRAM 0x00002000 -#define TG3_FLAG_NVRAM_BUFFERED 0x00004000 -#define TG3_FLAG_SUPPORT_MSI 0x00008000 -#define TG3_FLAG_SUPPORT_MSIX 0x00010000 -#define TG3_FLAG_SUPPORT_MSI_OR_MSIX (TG3_FLAG_SUPPORT_MSI | \ - TG3_FLAG_SUPPORT_MSIX) -#define TG3_FLAG_PCIX_MODE 0x00020000 -#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000 -#define TG3_FLAG_PCI_32BIT 0x00080000 -#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000 -#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000 -#define TG3_FLAG_WOL_CAP 0x00400000 -#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 -#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 -#define TG3_FLAG_CPMU_PRESENT 0x04000000 -#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 -#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 -#define TG3_FLAG_JUMBO_CAPABLE 0x20000000 -#define TG3_FLAG_CHIP_RESETTING 0x40000000 -#define TG3_FLAG_INIT_COMPLETE 0x80000000 - u32 tg3_flags2; -#define TG3_FLG2_RESTART_TIMER 0x00000001 -#define TG3_FLG2_TSO_BUG 0x00000002 -#define TG3_FLG2_IS_5788 0x00000008 -#define TG3_FLG2_MAX_RXPEND_64 0x00000010 -#define TG3_FLG2_TSO_CAPABLE 0x00000020 -#define TG3_FLG2_PCI_EXPRESS 0x00000200 -#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400 -#define TG3_FLG2_HW_AUTONEG 0x00000800 -#define TG3_FLG2_IS_NIC 0x00001000 -#define TG3_FLG2_FLASH 0x00008000 -#define TG3_FLG2_HW_TSO_1 0x00010000 -#define TG3_FLG2_5705_PLUS 0x00040000 -#define TG3_FLG2_5750_PLUS 0x00080000 -#define TG3_FLG2_HW_TSO_3 0x00100000 -#define TG3_FLG2_USING_MSI 0x00200000 -#define TG3_FLG2_USING_MSIX 0x00400000 -#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ - TG3_FLG2_USING_MSIX) -#define TG3_FLG2_ICH_WORKAROUND 0x02000000 -#define TG3_FLG2_5780_CLASS 0x04000000 -#define TG3_FLG2_HW_TSO_2 0x08000000 -#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \ - TG3_FLG2_HW_TSO_2 | \ - TG3_FLG2_HW_TSO_3) -#define TG3_FLG2_1SHOT_MSI 0x10000000 -#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 - u32 tg3_flags3; -#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 -#define TG3_FLG3_ENABLE_APE 0x00000002 -#define TG3_FLG3_PROTECTED_NVRAM 0x00000004 -#define TG3_FLG3_5701_DMA_BUG 0x00000008 -#define TG3_FLG3_USE_PHYLIB 0x00000010 -#define TG3_FLG3_MDIOBUS_INITED 0x00000020 -#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100 -#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 -#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 -#define TG3_FLG3_CLKREQ_BUG 0x00000800 -#define TG3_FLG3_5755_PLUS 0x00002000 -#define TG3_FLG3_NO_NVRAM 0x00004000 -#define TG3_FLG3_ENABLE_RSS 0x00020000 -#define TG3_FLG3_ENABLE_TSS 0x00040000 -#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000 -#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000 -#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 -#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 -#define TG3_FLG3_L1PLLPD_EN 0x00800000 -#define TG3_FLG3_5717_PLUS 0x01000000 - struct timer_list timer; u16 timer_counter; u16 timer_multiplier; @@ -2974,6 +3075,7 @@ #define TG3_PHY_ID_BCM5718S 0xbc050ff0 #define TG3_PHY_ID_BCM57765 0x5c0d8a40 #define TG3_PHY_ID_BCM5719C 0x5c0d8a20 +#define TG3_PHY_ID_BCM5720C 0x5c0d8b60 #define TG3_PHY_ID_BCM5906 0xdc00ac40 #define TG3_PHY_ID_BCM8002 0x60010140 #define TG3_PHY_ID_INVALID 0xffffffff @@ -3040,6 +3142,7 @@ int nvram_lock_cnt; u32 nvram_size; +#define TG3_NVRAM_SIZE_2KB 0x00000800 #define TG3_NVRAM_SIZE_64KB 0x00010000 #define TG3_NVRAM_SIZE_128KB 0x00020000 #define TG3_NVRAM_SIZE_256KB 0x00040000 @@ -3055,6 +3158,9 @@ #define JEDEC_SAIFUN 0x4f #define JEDEC_SST 0xbf +#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB +#define ATMEL_AT24C02_PAGE_SIZE (8) + #define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB #define ATMEL_AT24C64_PAGE_SIZE (32)