*******************************************************************************/
#include "e1000.h"
-#include <linux/rtnetlink.h>
/* Change Log
+ * 5.3.12 6/7/04
+ * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com>
+ * - if_mii support and associated kcompat for older kernels
+ * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
+ * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
*
- * 5.2.51 5/14/04
- * o set default configuration to 'NAPI disabled'. NAPI enabled driver
- * causes kernel panic when the interface is shutdown while data is being
- * transferred.
- * 5.2.47 5/04/04
- * o fixed ethtool -t implementation
- * 5.2.45 4/29/04
- * o fixed ethtool -e implementation
- * o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)]
- * 5.2.42 4/26/04
- * o Added support for the DPRINTK macro for enhanced error logging. Some
- * parts of the patch were supplied by Jon Mason.
- * o Move the register_netdevice() donw in the probe routine due to a
- * loading/unloading test issue.
- * o Added a long RX byte count the the extra ethtool data members for BER
- * testing purposes.
- * 5.2.39 3/12/04
+ * 5.3.11 6/4/04
+ * - ethtool register dump reads MANC register conditionally.
+ *
+ * 5.3.10 6/1/04
*/
char e1000_driver_name[] = "e1000";
char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-char e1000_driver_version[] = "5.2.52-k4";
+#ifndef CONFIG_E1000_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+char e1000_driver_version[] = "5.3.19-k2"DRIVERNAPI;
char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
*
- * Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, private data (not used) }
+ * Macro expands to...
+ * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
static struct pci_device_id e1000_pci_tbl[] = {
- {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ INTEL_E1000_ETHERNET_DEVICE(0x1000),
+ INTEL_E1000_ETHERNET_DEVICE(0x1001),
+ INTEL_E1000_ETHERNET_DEVICE(0x1004),
+ INTEL_E1000_ETHERNET_DEVICE(0x1008),
+ INTEL_E1000_ETHERNET_DEVICE(0x1009),
+ INTEL_E1000_ETHERNET_DEVICE(0x100C),
+ INTEL_E1000_ETHERNET_DEVICE(0x100D),
+ INTEL_E1000_ETHERNET_DEVICE(0x100E),
+ INTEL_E1000_ETHERNET_DEVICE(0x100F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1010),
+ INTEL_E1000_ETHERNET_DEVICE(0x1011),
+ INTEL_E1000_ETHERNET_DEVICE(0x1012),
+ INTEL_E1000_ETHERNET_DEVICE(0x1013),
+ INTEL_E1000_ETHERNET_DEVICE(0x1015),
+ INTEL_E1000_ETHERNET_DEVICE(0x1016),
+ INTEL_E1000_ETHERNET_DEVICE(0x1017),
+ INTEL_E1000_ETHERNET_DEVICE(0x1018),
+ INTEL_E1000_ETHERNET_DEVICE(0x1019),
+ INTEL_E1000_ETHERNET_DEVICE(0x101D),
+ INTEL_E1000_ETHERNET_DEVICE(0x101E),
+ INTEL_E1000_ETHERNET_DEVICE(0x1026),
+ INTEL_E1000_ETHERNET_DEVICE(0x1027),
+ INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1075),
+ INTEL_E1000_ETHERNET_DEVICE(0x1076),
+ INTEL_E1000_ETHERNET_DEVICE(0x1077),
+ INTEL_E1000_ETHERNET_DEVICE(0x1078),
+ INTEL_E1000_ETHERNET_DEVICE(0x1079),
+ INTEL_E1000_ETHERNET_DEVICE(0x107A),
+ INTEL_E1000_ETHERNET_DEVICE(0x107B),
+ INTEL_E1000_ETHERNET_DEVICE(0x107C),
/* required last entry */
{0,}
};
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
-static inline void e1000_irq_disable(struct e1000_adapter *adapter);
-static inline void e1000_irq_enable(struct e1000_adapter *adapter);
+static void e1000_irq_disable(struct e1000_adapter *adapter);
+static void e1000_irq_enable(struct e1000_adapter *adapter);
static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
#ifdef CONFIG_E1000_NAPI
void set_ethtool_ops(struct net_device *netdev);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
-static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
- struct e1000_rx_desc *rx_desc,
- struct sk_buff *skb);
+static void e1000_rx_checksum(struct e1000_adapter *adapter,
+ struct e1000_rx_desc *rx_desc,
+ struct sk_buff *skb);
static void e1000_tx_timeout(struct net_device *dev);
static void e1000_tx_timeout_task(struct net_device *dev);
static void e1000_smartspeed(struct e1000_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
-static void e1000_netpoll (struct net_device *dev);
+static void e1000_netpoll (struct net_device *netdev);
#endif
struct notifier_block e1000_notifier_reboot = {
extern void e1000_check_options(struct e1000_adapter *adapter);
-
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL");
-static int debug = 3;
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* hardware has been reset, we need to reload some things */
+ /* Reset the PHY if it was previously powered down */
+ if(adapter->hw.media_type == e1000_media_type_copper) {
+ uint16_t mii_reg;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ if(mii_reg & MII_CR_POWER_DOWN)
+ e1000_phy_reset(&adapter->hw);
+ }
+
e1000_set_multi(netdev);
e1000_restore_vlan(adapter);
e1000_reset(adapter);
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
+
+ /* If WoL is not enabled
+ * Power down the PHY so no link is implied when interface is down */
+ if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) {
+ uint16_t mii_reg;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ }
}
void
E1000_WRITE_REG(&adapter->hw, PBA, pba);
/* flow control settings */
- adapter->hw.fc_high_water =
- (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_HIGH_DIFF;
- adapter->hw.fc_low_water =
- (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_LOW_DIFF;
+ adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
+ E1000_FC_HIGH_DIFF;
+ adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
+ E1000_FC_LOW_DIFF;
adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
adapter->hw.fc_send_xon = 1;
adapter->hw.fc = adapter->hw.original_fc;
e1000_reset_hw(&adapter->hw);
if(adapter->hw.mac_type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, WUC, 0);
- e1000_init_hw(&adapter->hw);
+ if(e1000_init_hw(&adapter->hw))
+ DPRINTK(PROBE, ERR, "Hardware Error\n");
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
adapter->msg_enable = (1 << debug) - 1;
rtnl_lock();
- /* we need to set the name early since the DPRINTK macro needs it set */
- if (dev_alloc_name(netdev, netdev->name) < 0)
+ /* we need to set the name early for the DPRINTK macro */
+ if(dev_alloc_name(netdev, netdev->name) < 0)
goto err_free_unlock;
mmio_start = pci_resource_start(pdev, BAR_0);
}
#ifdef NETIF_F_TSO
-#ifdef BROKEN_ON_NON_IA_ARCHS
/* Disbaled for now until root-cause is found for
* hangs reported against non-IA archs. TSO can be
* enabled using ethtool -K eth<x> tso on */
(adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
#endif
-#endif
-
if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ /* hard_start_xmit is safe against parallel locking */
+ netdev->features |= NETIF_F_LLTX;
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
/* before reading the EEPROM, reset the controller to
/* copy the MAC address out of the EEPROM */
- e1000_read_mac_addr(&adapter->hw);
+ if (e1000_read_mac_addr(&adapter->hw))
+ DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
if(!is_valid_ether_addr(netdev->dev_addr)) {
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
adapter->wol |= E1000_WUFC_MAG;
/* reset the hardware with the new settings */
-
e1000_reset(adapter);
- /* since we are holding the rtnl lock already, call the no-lock version */
+ /* We're already holding the rtnl lock; call the no-lock version */
if((err = register_netdevice(netdev)))
goto err_register;
/* identify the MAC */
- if (e1000_set_mac_type(hw)) {
+ if(e1000_set_mac_type(hw)) {
DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
return -EIO;
}
e1000_init_eeprom_params(hw);
- if((hw->mac_type == e1000_82541) ||
- (hw->mac_type == e1000_82547) ||
- (hw->mac_type == e1000_82541_rev_2) ||
- (hw->mac_type == e1000_82547_rev_2))
+ switch(hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
hw->phy_init_script = 1;
+ break;
+ }
e1000_set_media_type(hw);
- if(hw->mac_type < e1000_82543)
- hw->report_tx_early = 0;
- else
- hw->report_tx_early = 1;
-
hw->wait_autoneg_complete = FALSE;
hw->tbi_compatibility_en = TRUE;
hw->adaptive_ifs = TRUE;
if((err = e1000_up(adapter)))
goto err_up;
- return 0;
+ return E1000_SUCCESS;
err_up:
e1000_free_rx_resources(adapter);
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
- txdr->buffer_info = kmalloc(size, GFP_KERNEL);
+ txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Transmit descriptor ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if(!txdr->desc) {
- kfree(txdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Transmit descriptor ring\n");
+ vfree(txdr->buffer_info);
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
E1000_TXD_CMD_IFCS;
- if(adapter->hw.report_tx_early == 1)
- adapter->txd_cmd |= E1000_TXD_CMD_RS;
- else
+ if(adapter->hw.mac_type < e1000_82543)
adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+ else
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
* need this to apply a workaround later in the send path. */
int size;
size = sizeof(struct e1000_buffer) * rxdr->count;
- rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
+ rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Recieve descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
if(!rxdr->desc) {
- kfree(rxdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Recieve descriptor ring\n");
+ vfree(rxdr->buffer_info);
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
else
rctl &= ~E1000_RCTL_SBP;
+ /* Setup buffer sizes */
rctl &= ~(E1000_RCTL_SZ_4096);
+ rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE);
switch (adapter->rx_buffer_len) {
case E1000_RXBUFFER_2048:
default:
rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
break;
case E1000_RXBUFFER_4096:
- rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ rctl |= E1000_RCTL_SZ_4096;
break;
case E1000_RXBUFFER_8192:
- rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ rctl |= E1000_RCTL_SZ_8192;
break;
case E1000_RXBUFFER_16384:
- rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ rctl |= E1000_RCTL_SZ_16384;
break;
}
uint32_t rctl;
uint32_t rxcsum;
- /* make sure receives are disabled while setting up the descriptors */
-
+ /* disable receives while setting up the descriptors */
rctl = E1000_READ_REG(&adapter->hw, RCTL);
E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
/* set the Receive Delay Timer Register */
-
E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
if(adapter->hw.mac_type >= e1000_82540) {
}
/* Setup the Base and Length of the Rx Descriptor Ring */
-
E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
}
/* Enable Receives */
-
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
}
e1000_clean_tx_ring(adapter);
- kfree(adapter->tx_ring.buffer_info);
+ vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
pci_free_consistent(pdev, adapter->tx_ring.size,
if(buffer_info->skb) {
pci_unmap_page(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
dev_kfree_skb(buffer_info->skb);
e1000_clean_rx_ring(adapter);
- kfree(rx_ring->buffer_info);
+ vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
if(buffer_info->skb) {
pci_unmap_single(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(buffer_info->skb);
-
buffer_info->skb = NULL;
}
}
uint32_t rctl;
uint32_t hash_value;
int i;
+ unsigned long flags;
/* Check for Promiscuous and All Multicast modes */
+ spin_lock_irqsave(&adapter->tx_lock, flags);
+
rctl = E1000_READ_REG(hw, RCTL);
if(netdev->flags & IFF_PROMISC) {
if(hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
+
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
-/* need to wait a few seconds after link up to get diagnostic information from the phy */
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
static void
e1000_update_phy_info(unsigned long data)
adapter->tpt_old = adapter->stats.tpt;
adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
adapter->colc_old = adapter->stats.colc;
-
+
adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
adapter->gorcl_old = adapter->stats.gorcl;
adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
#ifdef NETIF_F_TSO
struct e1000_context_desc *context_desc;
unsigned int i;
- uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+ uint32_t cmd_length = 0;
uint16_t ipcse, tucse, mss;
+ uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
if(skb_shinfo(skb)->tso_size) {
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
tucse = 0;
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
+ (skb->len - (hdr_len)));
+
i = adapter->tx_ring.next_to_use;
context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
- context_desc->cmd_and_length = cpu_to_le32(
- E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
- E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
- (skb->len - (hdr_len)));
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
{
struct e1000_context_desc *context_desc;
unsigned int i;
- uint8_t css, cso;
+ uint8_t css;
- if(skb->ip_summed == CHECKSUM_HW) {
+ if(likely(skb->ip_summed == CHECKSUM_HW)) {
css = skb->h.raw - skb->data;
- cso = (skb->h.raw + skb->csum) - skb->data;
i = adapter->tx_ring.next_to_use;
context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso = cso;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
- if(++i == adapter->tx_ring.count) i = 0;
+ if(unlikely(++i == adapter->tx_ring.count)) i = 0;
adapter->tx_ring.next_to_use = i;
return TRUE;
unsigned int f;
len -= skb->data_len;
-
i = tx_ring->next_to_use;
while(len) {
#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if(mss && !nr_frags && size == len && size > 8)
+ if(unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
#endif
/* Workaround for potential 82544 hang in PCI-X. Avoid
* terminating buffers within evenly-aligned dwords. */
- if(adapter->pcix_82544 &&
+ if(unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
- size > 4)
+ size > 4))
size -= 4;
buffer_info->length = size;
len -= size;
offset += size;
count++;
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
for(f = 0; f < nr_frags; f++) {
#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if(mss && f == (nr_frags-1) && size == len && size > 8)
+ if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
size -= 4;
#endif
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
* dwords. */
- if(adapter->pcix_82544 &&
+ if(unlikely(adapter->pcix_82544 &&
!((unsigned long)(frag->page+offset+size-1) & 4) &&
- size > 4)
+ size > 4))
size -= 4;
buffer_info->length = size;
len -= size;
offset += size;
count++;
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
}
+
i = (i == 0) ? tx_ring->count - 1 : i - 1;
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
-
+
return count;
}
uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
unsigned int i;
- if(tx_flags & E1000_TX_FLAGS_TSO) {
+ if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
E1000_TXD_CMD_TSE;
txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
}
- if(tx_flags & E1000_TX_FLAGS_CSUM) {
+ if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
}
- if(tx_flags & E1000_TX_FLAGS_VLAN) {
+ if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
txd_lower |= E1000_TXD_CMD_VLE;
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
}
tx_desc->lower.data =
cpu_to_le32(txd_lower | buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
return 0;
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
static int
e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned long flags;
unsigned int len = skb->len;
- int count = 0;
- unsigned int mss = 0;
+ unsigned long flags;
unsigned int nr_frags = 0;
+ unsigned int mss = 0;
+ int count = 0;
unsigned int f;
nr_frags = skb_shinfo(skb)->nr_frags;
len -= skb->data_len;
- if(skb->len <= 0) {
+
+ if(unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
}
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size;
- /* The controller does a simple calculation to
+ /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
}
+
if((mss) || (skb->ip_summed == CHECKSUM_HW))
count++;
- count++; /*for sentinel desc*/
+ count++; /* for sentinel desc */
#else
if(skb->ip_summed == CHECKSUM_HW)
count++;
#endif
-
count += TXD_USE_COUNT(len, max_txd_pwr);
+
if(adapter->pcix_82544)
count++;
nr_frags = skb_shinfo(skb)->nr_frags;
for(f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
- max_txd_pwr);
+ max_txd_pwr);
if(adapter->pcix_82544)
count += nr_frags;
-
- spin_lock_irqsave(&adapter->tx_lock, flags);
- /* need: count + 2 desc gap to keep tail from touching
+
+ local_irq_save(flags);
+ if (!spin_trylock(&adapter->tx_lock)) {
+ /* Collision - tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+
+ /* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
- if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2 ) {
+ if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
- if(adapter->hw.mac_type == e1000_82547) {
- if(e1000_82547_fifo_workaround(adapter, skb)) {
+ if(unlikely(adapter->hw.mac_type == e1000_82547)) {
+ if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
- return 1;
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_BUSY;
}
}
- if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
tx_flags |= E1000_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
first = adapter->tx_ring.next_to_use;
- if(e1000_tso(adapter, skb))
+ if(likely(e1000_tso(adapter, skb)))
tx_flags |= E1000_TX_FLAGS_TSO;
- else if(e1000_tx_csum(adapter, skb))
+ else if(likely(e1000_tx_csum(adapter, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
- e1000_tx_queue(adapter,
- e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
+ e1000_tx_queue(adapter,
+ e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
tx_flags);
netdev->trans_start = jiffies;
- return 0;
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
}
/**
{
struct e1000_adapter *adapter = netdev->priv;
- netif_device_detach(netdev);
e1000_down(adapter);
e1000_up(adapter);
- netif_device_attach(netdev);
}
/**
}
if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
-
e1000_down(adapter);
e1000_up(adapter);
}
adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
- /* the rest of the counters are only modified here */
-
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
adapter->stats.mpc += E1000_READ_REG(hw, MPC);
adapter->stats.scc += E1000_READ_REG(hw, SCC);
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
- if(atomic_dec_and_test(&adapter->irq_sem)) {
+ if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev->priv;
struct e1000_hw *hw = &adapter->hw;
- uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
+ uint32_t icr = E1000_READ_REG(hw, ICR);
#ifndef CONFIG_E1000_NAPI
unsigned int i;
#endif
- if(!icr)
+ if(unlikely(!icr))
return IRQ_NONE; /* Not our interrupt */
- if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
mod_timer(&adapter->watchdog_timer, jiffies);
}
#ifdef CONFIG_E1000_NAPI
- if(netif_rx_schedule_prep(netdev)) {
+ if(likely(netif_rx_schedule_prep(netdev))) {
/* Disable interrupts and register for poll. The flush
of the posted write is intentionally left out.
}
#else
for(i = 0; i < E1000_MAX_INTR; i++)
- if(!e1000_clean_rx_irq(adapter) &
- !e1000_clean_tx_irq(adapter))
+ if(unlikely(!e1000_clean_rx_irq(adapter) &
+ !e1000_clean_tx_irq(adapter)))
break;
#endif
{
struct e1000_adapter *adapter = netdev->priv;
int work_to_do = min(*budget, netdev->quota);
+ int tx_cleaned;
int work_done = 0;
- e1000_clean_tx_irq(adapter);
+ tx_cleaned = e1000_clean_tx_irq(adapter);
e1000_clean_rx_irq(adapter, &work_done, work_to_do);
*budget -= work_done;
netdev->quota -= work_done;
- if(work_done < work_to_do || !netif_running(netdev)) {
+ /* if no Rx and Tx cleanup work was done, exit the polling mode */
+ if(!tx_cleaned || (work_done < work_to_do) ||
+ !netif_running(netdev)) {
netif_rx_complete(netdev);
e1000_irq_enable(adapter);
return 0;
return (work_done >= work_to_do);
}
-#endif
+#endif
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
unsigned int i, eop;
boolean_t cleaned = FALSE;
-
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
-
for(cleaned = FALSE; !cleaned; ) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
- if(buffer_info->dma) {
-
+ if(likely(buffer_info->dma)) {
pci_unmap_page(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
-
buffer_info->dma = 0;
}
if(buffer_info->skb) {
-
dev_kfree_skb_any(buffer_info->skb);
-
buffer_info->skb = NULL;
}
tx_desc->upper.data = 0;
cleaned = (i == eop);
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
eop = tx_ring->buffer_info[i].next_to_watch;
spin_lock(&adapter->tx_lock);
- if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+ if(unlikely(cleaned && netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev)))
netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock);
}
/**
- * e1000_clean_rx_irq - Send received data up the network stack,
+ * e1000_clean_rx_irq - Send received data up the network stack
* @adapter: board private structure
**/
while(rx_desc->status & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i];
-
#ifdef CONFIG_E1000_NAPI
if(*work_done >= work_to_do)
break;
-
(*work_done)++;
#endif
-
cleaned = TRUE;
pci_unmap_single(pdev,
skb = buffer_info->skb;
length = le16_to_cpu(rx_desc->length);
- if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
-
+ if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */
-
- E1000_DBG("%s: Receive packet consumed multiple buffers\n",
- netdev->name);
-
+ E1000_DBG("%s: Receive packet consumed multiple"
+ " buffers\n", netdev->name);
dev_kfree_skb_irq(skb);
- rx_desc->status = 0;
- buffer_info->skb = NULL;
-
- if(++i == rx_ring->count) i = 0;
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- continue;
+ goto next_desc;
}
- if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
-
+ if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
last_byte = *(skb->data + length - 1);
-
if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
rx_desc->errors, length, last_byte)) {
-
spin_lock_irqsave(&adapter->stats_lock, flags);
-
e1000_tbi_adjust_stats(&adapter->hw,
&adapter->stats,
length, skb->data);
-
spin_unlock_irqrestore(&adapter->stats_lock,
flags);
length--;
} else {
-
dev_kfree_skb_irq(skb);
- rx_desc->status = 0;
- buffer_info->skb = NULL;
-
- if(++i == rx_ring->count) i = 0;
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- continue;
+ goto next_desc;
}
}
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_E1000_NAPI
- if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
+ if(unlikely(adapter->vlgrp &&
+ (rx_desc->status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special &
- E1000_RXD_SPC_VLAN_MASK));
+ le16_to_cpu(rx_desc->special &
+ E1000_RXD_SPC_VLAN_MASK));
} else {
netif_receive_skb(skb);
}
#else /* CONFIG_E1000_NAPI */
- if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
+ if(unlikely(adapter->vlgrp &&
+ (rx_desc->status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special &
+ le16_to_cpu(rx_desc->special &
E1000_RXD_SPC_VLAN_MASK));
} else {
netif_rx(skb);
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
+next_desc:
rx_desc->status = 0;
buffer_info->skb = NULL;
-
- if(++i == rx_ring->count) i = 0;
+ if(unlikely(++i == rx_ring->count)) i = 0;
rx_desc = E1000_RX_DESC(*rx_ring, i);
}
buffer_info = &rx_ring->buffer_info[i];
while(!buffer_info->skb) {
- rx_desc = E1000_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
-
- if(!skb) {
+ if(unlikely(!skb)) {
/* Better luck next round */
break;
}
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma =
- pci_map_single(pdev,
- skb->data,
- adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
+ if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
E1000_WRITE_REG(&adapter->hw, RDT, i);
}
- if(++i == rx_ring->count) i = 0;
+ if(unlikely(++i == rx_ring->count)) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
return -EFAULT;
mii_reg = data->val_in;
if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
- data->val_in))
+ mii_reg))
return -EIO;
if (adapter->hw.phy_type == e1000_phy_m88) {
switch (data->reg_num) {
case PHY_CTRL:
- if(data->val_in & MII_CR_AUTO_NEG_EN) {
+ if(mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if(mii_reg & MII_CR_AUTO_NEG_EN) {
adapter->hw.autoneg = 1;
adapter->hw.autoneg_advertised = 0x2F;
} else {
- if (data->val_in & 0x40)
+ if (mii_reg & 0x40)
spddplx = SPEED_1000;
- else if (data->val_in & 0x2000)
+ else if (mii_reg & 0x2000)
spddplx = SPEED_100;
else
spddplx = SPEED_10;
- spddplx += (data->val_in & 0x100)
+ spddplx += (mii_reg & 0x100)
? FULL_DUPLEX :
HALF_DUPLEX;
retval = e1000_set_spd_dplx(adapter,
return -EIO;
break;
}
+ } else {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if(mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if(netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ break;
+ }
}
break;
default:
* @sk_buff: socket buffer with received data
**/
-static inline void
+static void
e1000_rx_checksum(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc,
struct sk_buff *skb)
{
/* 82543 or newer only */
- if((adapter->hw.mac_type < e1000_82543) ||
+ if(unlikely((adapter->hw.mac_type < e1000_82543) ||
/* Ignore Checksum bit is set */
(rx_desc->status & E1000_RXD_STAT_IXSM) ||
/* TCP Checksum has not been calculated */
- (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
+ (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_err++;
} else {
- /* TCP checksum is good */
+ /* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
{
struct e1000_adapter *adapter = hw->back;
- pci_set_mwi(adapter->pdev);
+ int ret;
+ ret = pci_set_mwi(adapter->pdev);
}
void
if(grp) {
/* enable VLAN tag insert/strip */
-
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
/* enable VLAN receive filtering */
-
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_VFE;
rctl &= ~E1000_RCTL_CFIEN;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
} else {
/* disable VLAN tag insert/strip */
-
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl &= ~E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
/* disable VLAN filtering */
-
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl &= ~E1000_RCTL_VFE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
uint32_t vfta, index;
/* add VID to filter table */
-
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
vfta |= (1 << (vid & 0x1F));
e1000_irq_enable(adapter);
- /* remove VID from filter table*/
-
+ /* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
vfta &= ~(1 << (vid & 0x1F));
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
+ DPRINTK(PROBE, ERR,
+ "Unsupported Speed/Duplexity configuration\n");
return -EINVAL;
}
return 0;
}
}
+ pci_disable_device(pdev);
+
state = (state > 0) ? 3 : 0;
pci_set_power_state(pdev, state);
struct e1000_adapter *adapter = netdev->priv;
uint32_t manc;
+ pci_enable_device(pdev);
pci_set_power_state(pdev, 0);
pci_restore_state(pdev, adapter->pci_state);
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
-
-static void e1000_netpoll (struct net_device *dev)
+static void
+e1000_netpoll (struct net_device *netdev)
{
- struct e1000_adapter *adapter = dev->priv;
+ struct e1000_adapter *adapter = netdev->priv;
disable_irq(adapter->pdev->irq);
- e1000_intr (adapter->pdev->irq, dev, NULL);
+ e1000_intr(adapter->pdev->irq, netdev, NULL);
enable_irq(adapter->pdev->irq);
}
#endif