* notice is accompanying it.
*/
+#include <linux/config.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < 0x020500)
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#endif
#include <linux/module.h>
+#if (LINUX_VERSION_CODE >= 0x20600)
#include <linux/moduleparam.h>
+#endif
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#if (LINUX_VERSION_CODE >= 0x20600)
#include <linux/workqueue.h>
+#endif
#include <linux/prefetch.h>
+#if (LINUX_VERSION_CODE >= 0x020600)
#include <linux/dma-mapping.h>
+#endif
+#include <linux/bitops.h>
#include <net/checksum.h>
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.65"
-#define DRV_MODULE_RELDATE "August 07, 2006"
+#define DRV_MODULE_VERSION "3.66f"
+#define DRV_MODULE_RELDATE "September 1, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
MODULE_VERSION(DRV_MODULE_VERSION);
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
+#if (LINUX_VERSION_CODE >= 0x20600)
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+#endif
static struct pci_device_id tg3_pci_tbl[] = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
(tp->hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ else
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
}
static void tg3_enable_ints(struct tg3 *tp)
static void tg3_power_down_phy(struct tg3 *tp)
{
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ return;
+
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
+
/* The PHY should not be powered down on some chips because
* of bugs.
*/
tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
}
tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
udelay(40);
- mac_mode = MAC_MODE_PORT_MODE_MII;
+ if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+ mac_mode = MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode = MAC_MODE_PORT_MODE_MII;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
}
if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+ !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
/* Turn off the PHY */
- if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- MII_TG3_EXT_CTRL_FORCE_LED_OFF);
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
- tg3_power_down_phy(tp);
- }
- }
+ tg3_power_down_phy(tp);
tg3_frob_aux_power(tp);
expected_sg_dig_ctrl |= (1 << 12);
if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+restart_autoneg:
if (workaround)
tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
!(mac_status & MAC_STATUS_RCVD_CFG)) {
tg3_setup_flow_control(tp, 0, 0);
current_link_up = 1;
- }
+ } else
+ goto restart_autoneg;
}
}
}
tp->rx_rcb_ptr = sw_idx;
tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Refill RX ring(s). */
if (work_mask & RXD_OPAQUE_RING_STD) {
sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
tp->irq_sync = 1;
smp_mb();
+#if (LINUX_VERSION_CODE >= 0x2051c)
synchronize_irq(tp->pdev->irq);
+#else
+ synchronize_irq();
+#endif
}
static inline int tg3_irq_sync(struct tg3 *tp)
return err;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
static void tg3_poll_controller(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+#if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
+ if (netdump_mode) {
+ tg3_interrupt(tp->pdev->irq, dev, NULL);
+ if (dev->poll_list.prev) {
+ int budget = 64;
+
+ tg3_poll(dev, &budget);
+ }
+ }
+ else
+#endif
tg3_interrupt(tp->pdev->irq, dev, NULL);
}
#endif
goto out_unlock;
}
+#ifdef NETIF_F_GSO
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
- else {
+ else
+#endif
+ {
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
ip_tcp_len = (skb->nh.iph->ihl * 4) +
sizeof(struct tcphdr);
}
}
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
netif_wake_queue(tp->dev);
}
+#if TG3_TSO_SUPPORT != 0
out_unlock:
+#endif
mmiowb();
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
+
#if TG3_TSO_SUPPORT != 0
+#ifdef NETIF_F_GSO
static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
/* Use GSO to workaround a rare TSO bug that may be triggered when the
return NETDEV_TX_OK;
}
#endif
+#endif
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only.
ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;
+#ifdef NETIF_F_GSO
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
return (tg3_tso_bug(tp, skb));
+#endif
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
entry = start;
}
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
if (new_mtu > ETH_DATA_LEN) {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
+#if TG3_TSO_SUPPORT != 0
ethtool_op_set_tso(dev, 0);
+#endif
}
else
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
}
}
+static int tg3_poll_fw(struct tg3 *tp)
+{
+ int i;
+ u32 val;
+
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+
+ /* Chip might not be fitted with firmare. Some Sun onboard
+ * parts are configured like that. So don't signal the timeout
+ * of the above loop as an error, but do report the lack of
+ * running firmware once.
+ */
+ if (i >= 100000 &&
+ !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
+ tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
+
+ printk(KERN_INFO PFX "%s: No firmware running.\n",
+ tp->dev->name);
+ }
+
+ return 0;
+}
+
static void tg3_stop_fw(struct tg3 *);
/* tp->lock is held. */
{
u32 val;
void (*write_op)(struct tg3 *, u32, u32);
- int i;
+ int err;
tg3_nvram_lock(tp);
val |= PCISTATE_RETRY_SAME_DMA;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_restore_state(tp->pdev, tp->pci_cfg_state);
+#else
pci_restore_state(tp->pdev);
+#endif
/* Make sure PCI-X relaxed ordering bit is clear. */
pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
tw32_f(MAC_MODE, 0);
udelay(40);
- /* Wait for firmware initialization to complete. */
- for (i = 0; i < 100000; i++) {
- tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
- if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
- break;
- udelay(10);
- }
-
- /* Chip might not be fitted with firmare. Some Sun onboard
- * parts are configured like that. So don't signal the timeout
- * of the above loop as an error, but do report the lack of
- * running firmware once.
- */
- if (i >= 100000 &&
- !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
- tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
-
- printk(KERN_INFO PFX "%s: No firmware running.\n",
- tp->dev->name);
- }
+ err = tg3_poll_fw(tp);
+ if (err)
+ return err;
if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
u32 val;
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
- FWCMD_NICDRV_ALIVE2);
+ FWCMD_NICDRV_ALIVE_DETECT);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
/* 5 seconds timeout */
tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
TG3_64BIT_REG_LOW);
if (int_mbox != 0)
break;
+
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+#else
msleep(10);
+#endif
}
tg3_disable_ints(tp);
tp->dev->name);
free_irq(tp->pdev->irq, dev);
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
tg3_full_lock(tp, 0);
err = tg3_set_power_state(tp, PCI_D0);
- if (err) {
- tg3_full_unlock(tp);
+ if (err)
return err;
- }
tg3_disable_ints(tp);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
if (err)
return err;
+#ifdef CONFIG_PCI_MSI
if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
(GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
(GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
}
}
+#endif
err = tg3_request_irq(tp);
if (err) {
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_free_consistent(tp);
if (err) {
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_free_consistent(tp);
tg3_full_lock(tp, 0);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
* linkwatch_event() may be on the workqueue and it will try to get
* the rtnl_lock which we are holding.
*/
- while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
+ while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK) {
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+#else
msleep(1);
+#endif
+ }
netif_stop_queue(dev);
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_full_unlock(tp);
}
+#if (LINUX_VERSION_CODE >= 0x20418)
static int tg3_get_eeprom_len(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
return tp->nvram_size;
}
+#endif
static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
+#ifdef ETHTOOL_GEEPROM
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
}
return 0;
}
+#endif
static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+#ifdef ETHTOOL_SEEPROM
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
return ret;
}
+#endif
static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if ((wol->wolopts & WAKE_MAGIC) &&
- tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
+ tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
!(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
return -EINVAL;
return 0;
}
+#if (LINUX_VERSION_CODE >= 0x20418)
static int tg3_set_tx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+#if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
+ tg3_set_tx_hw_csum(dev, data);
+#else
ethtool_op_set_tx_hw_csum(dev, data);
+#endif
else
ethtool_op_set_tx_csum(dev, data);
return 0;
}
+#endif
static int tg3_get_stats_count (struct net_device *dev)
{
else
tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
LED_CTRL_TRAFFIC_OVERRIDE);
-
+#if (LINUX_VERSION_CODE < 0x20609)
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 2))
+#else
if (msleep_interruptible(500))
+#endif
break;
}
tw32(MAC_LED_CTRL, tp->led_ctrl);
goto out;
/* Selfboot format */
- if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
+ if ((cpu_to_be32(buf[0]) & 0xff000000) == 0xa5000000) {
u8 *buf8 = (u8 *) buf, csum8 = 0;
for (i = 0; i < size; i++)
}
#define TG3_SERDES_TIMEOUT_SEC 2
-#define TG3_COPPER_TIMEOUT_SEC 6
+#define TG3_COPPER_TIMEOUT_SEC 7
static int tg3_test_link(struct tg3 *tp)
{
if (netif_carrier_ok(tp->dev))
return 0;
+#if (LINUX_VERSION_CODE < 0x20609)
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ))
+#else
if (msleep_interruptible(1000))
+#endif
break;
}
/* Only test the commonly used registers */
static int tg3_test_registers(struct tg3 *tp)
{
- int i, is_5705;
+ int i, is_5705, is_5750;
u32 offset, read_mask, write_mask, val, save_val, read_val;
static struct {
u16 offset;
#define TG3_FL_5705 0x1
#define TG3_FL_NOT_5705 0x2
#define TG3_FL_NOT_5788 0x4
+#define TG3_FL_NOT_5750 0x8
u32 read_mask;
u32 write_mask;
} reg_tbl[] = {
0xffffffff, 0x00000000 },
/* Buffer Manager Control Registers. */
- { BUFMGR_MB_POOL_ADDR, 0x0000,
+ { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
0x00000000, 0x007fff80 },
- { BUFMGR_MB_POOL_SIZE, 0x0000,
+ { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
0x00000000, 0x007fffff },
{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
0x00000000, 0x0000003f },
{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
};
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ is_5705 = 0;
+ is_5750 = 0;
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
is_5705 = 1;
- else
- is_5705 = 0;
+ if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+ is_5750 = 1;
+ }
for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
(reg_tbl[i].flags & TG3_FL_NOT_5788))
continue;
+ if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
+ continue;
+
offset = (u32) reg_tbl[i].offset;
read_mask = reg_tbl[i].read_mask;
write_mask = reg_tbl[i].write_mask;
return 0;
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
- MAC_MODE_PORT_MODE_GMII;
+ MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
+ if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
tw32(MAC_MODE, mac_mode);
} else if (loopback_mode == TG3_PHY_LOOPBACK) {
- tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
- BMCR_SPEED1000);
+ u32 val;
+
+ val = BMCR_LOOPBACK | BMCR_FULLDPLX;
+ if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+ val |= BMCR_SPEED100;
+ else
+ val |= BMCR_SPEED1000;
+
+ tg3_writephy(tp, MII_BMCR, val);
udelay(40);
+
/* reset to prevent losing 1st rx packet intermittently */
if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
tw32_f(MAC_RX_MODE, RX_MODE_RESET);
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
+ MAC_MODE_LINK_POLARITY;
+ if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
mac_mode &= ~MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
tp->tx_prod++;
num_pkts++;
+ /* Some platforms need to sync memory here */
+ wmb();
+
tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
tp->tx_prod);
tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
udelay(10);
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 50; i++) {
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
HOSTCC_MODE_NOW);
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+#if (LINUX_VERSION_CODE >= 0x020607)
struct mii_ioctl_data *data = if_mii(ifr);
+#else
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
+#endif
struct tg3 *tp = netdev_priv(dev);
int err;
.set_msglevel = tg3_set_msglevel,
.nway_reset = tg3_nway_reset,
.get_link = ethtool_op_get_link,
+#if (LINUX_VERSION_CODE >= 0x20418)
.get_eeprom_len = tg3_get_eeprom_len,
+#endif
+#ifdef ETHTOOL_GEEPROM
.get_eeprom = tg3_get_eeprom,
+#endif
+#ifdef ETHTOOL_SEEPROM
.set_eeprom = tg3_set_eeprom,
+#endif
.get_ringparam = tg3_get_ringparam,
.set_ringparam = tg3_set_ringparam,
.get_pauseparam = tg3_get_pauseparam,
.get_rx_csum = tg3_get_rx_csum,
.set_rx_csum = tg3_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
+#if (LINUX_VERSION_CODE >= 0x20418)
.set_tx_csum = tg3_set_tx_csum,
+#endif
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
#if TG3_TSO_SUPPORT != 0
.get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
+#ifdef ETHTOOL_GPERMADDR
.get_perm_addr = ethtool_op_get_perm_addr,
+#endif
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
/* Make sure register accesses (indirect or otherwise)
* will function correctly.
PCI_VPD_ADDR, &tmp16);
if (tmp16 & 0x8000)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+#else
msleep(1);
+#endif
}
if (!(tmp16 & 0x8000))
goto out_not_found;
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
+#if (LINUX_VERSION_CODE >= 0x2060a)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
PCI_DEVICE_ID_VIA_8385_0) },
{ },
};
+#endif
u32 misc_ctrl_reg;
u32 cacheline_sz_reg;
u32 pci_state_reg, grc_misc_cfg;
* every mailbox register write to force the writes to be
* posted to the chip in order.
*/
+#if (LINUX_VERSION_CODE < 0x2060a)
+ if ((pci_find_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
+ pci_find_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
+ pci_find_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
+#else
if (pci_dev_present(write_reorder_chipsets) &&
+#endif
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
#endif
return -EINVAL;
}
+#ifdef ETHTOOL_GPERMADDR
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+#endif
return 0;
}
}
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
+#if (LINUX_VERSION_CODE >= 0x2060a)
static struct pci_device_id dma_wait_state_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
{ },
};
+#endif
/* DMA test passed without adjusting DMA boundary,
* now look for chipsets that are known to expose the
* DMA bug without failing the test.
*/
- if (pci_dev_present(dma_wait_state_chipsets)) {
+#if (LINUX_VERSION_CODE < 0x2060a)
+ if (pci_find_device(PCI_VENDOR_ID_APPLE,
+ PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
+#else
+ if (pci_dev_present(dma_wait_state_chipsets))
+#endif
+ {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
}
}
SET_MODULE_OWNER(dev);
+#if (LINUX_VERSION_CODE >= 0x20419)
SET_NETDEV_DEV(dev, &pdev->dev);
+#endif
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->change_mtu = tg3_change_mtu;
dev->irq = pdev->irq;
-#ifdef CONFIG_NET_POLL_CONTROLLER
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
dev->poll_controller = tg3_poll_controller;
#endif
*/
if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_save_state(tp->pdev, tp->pci_cfg_state);
+#else
pci_save_state(tp->pdev);
+#endif
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
}
* of the PCI config space. We need to restore this after
* GRC_MISC_CFG core clock resets and some resume events.
*/
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_save_state(tp->pdev, tp->pci_cfg_state);
+#else
pci_save_state(tp->pdev);
+#endif
err = register_netdev(dev);
if (err) {
}
err_out_free_dev:
+#if (LINUX_VERSION_CODE >= 0x20418)
free_netdev(dev);
+#else
+ kfree(dev);
+#endif
err_out_free_res:
pci_release_regions(pdev);
if (dev) {
struct tg3 *tp = netdev_priv(dev);
+#if (LINUX_VERSION_CODE >= 0x20600)
flush_scheduled_work();
+#endif
unregister_netdev(dev);
if (tp->regs) {
iounmap(tp->regs);
tp->regs = NULL;
}
+#if (LINUX_VERSION_CODE >= 0x20418)
free_netdev(dev);
+#else
+ kfree(dev);
+#endif
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
+#if (LINUX_VERSION_CODE < 0x2060b)
+static int tg3_suspend(struct pci_dev *pdev, u32 state)
+#else
static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
+#if (LINUX_VERSION_CODE >= 0x20600)
flush_scheduled_work();
+#endif
tg3_netif_stop(tp);
del_timer_sync(&tp->timer);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
+#if (LINUX_VERSION_CODE < 0x2060b)
+ err = tg3_set_power_state(tp, state);
+#else
err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
+#endif
if (err) {
tg3_full_lock(tp, 0);
if (!netif_running(dev))
return 0;
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_restore_state(tp->pdev, tp->pci_cfg_state);
+#else
pci_restore_state(tp->pdev);
+#endif
err = tg3_set_power_state(tp, PCI_D0);
if (err)