* notice is accompanying it.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.59.1"
-#define DRV_MODULE_RELDATE "August 25, 2006"
+#define DRV_MODULE_VERSION "3.65"
+#define DRV_MODULE_RELDATE "August 07, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
TG3_RX_RCB_RING_SIZE(tp))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
-#define TX_BUFFS_AVAIL(TP) \
- ((TP)->tx_pending - \
- (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
return err;
}
+/* This is called whenever we suspect that the system chipset is re-
+ * ordering the sequence of MMIO to the tx send mailbox. The symptom
+ * is bogus tx completions. We try to recover by setting the
+ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
+ * in the workqueue.
+ */
+static void tg3_tx_recover(struct tg3 *tp)
+{
+ BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
+ tp->write32_tx_mbox == tg3_write_indirect_mbox);
+
+ printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
+ "mapped I/O cycles to the network device, attempting to "
+ "recover. Please report the problem to the driver maintainer "
+ "and include system chipset information.\n", tp->dev->name);
+
+ spin_lock(&tp->lock);
+ tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
+ spin_unlock(&tp->lock);
+}
+
+static inline u32 tg3_tx_avail(struct tg3 *tp)
+{
+ smp_mb();
+ return (tp->tx_pending -
+ ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
+}
+
/* Tigon3 never reports partial packet sends. So we do not
* need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does.
while (sw_idx != hw_idx) {
struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
- int i;
+ int i, tx_bug = 0;
+
+ if (unlikely(skb == NULL)) {
+ tg3_tx_recover(tp);
+ return;
+ }
- BUG_ON(skb == NULL);
pci_unmap_single(tp->pdev,
pci_unmap_addr(ri, mapping),
skb_headlen(skb),
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- BUG_ON(sw_idx == hw_idx);
-
ri = &tp->tx_buffers[sw_idx];
- BUG_ON(ri->skb != NULL);
+ if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
+ tx_bug = 1;
pci_unmap_page(tp->pdev,
pci_unmap_addr(ri, mapping),
}
dev_kfree_skb(skb);
+
+ if (unlikely(tx_bug)) {
+ tg3_tx_recover(tp);
+ return;
+ }
}
tp->tx_cons = sw_idx;
- if (unlikely(netif_queue_stopped(tp->dev))) {
- spin_lock(&tp->tx_lock);
+ /* Need to make the tx_cons update visible to tg3_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that tg3_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(tp->dev) &&
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
+ netif_tx_lock(tp->dev);
if (netif_queue_stopped(tp->dev) &&
- (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
netif_wake_queue(tp->dev);
- spin_unlock(&tp->tx_lock);
+ netif_tx_unlock(tp->dev);
}
}
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = dev_alloc_skb(skb_size);
+ skb = netdev_alloc_skb(tp->dev, skb_size);
if (skb == NULL)
return -ENOMEM;
- skb->dev = tp->dev;
skb_reserve(skb, tp->rx_offset);
mapping = pci_map_single(tp->pdev, skb->data,
*/
static int tg3_rx(struct tg3 *tp, int budget)
{
- u32 work_mask;
+ u32 work_mask, rx_std_posted = 0;
u32 sw_idx = tp->rx_rcb_ptr;
u16 hw_idx;
int received;
mapping);
skb = tp->rx_std_buffers[desc_idx].skb;
post_ptr = &tp->rx_std_ptr;
+ rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
mapping);
tg3_recycle_rx(tp, opaque_key,
desc_idx, *post_ptr);
- copy_skb = dev_alloc_skb(len + 2);
+ copy_skb = netdev_alloc_skb(tp->dev, len + 2);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- copy_skb->dev = tp->dev;
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
next_pkt:
(*post_ptr)++;
+
+ if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
+ u32 idx = *post_ptr % TG3_RX_RING_SIZE;
+
+ tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
+ TG3_64BIT_REG_LOW, idx);
+ work_mask &= ~RXD_OPAQUE_RING_STD;
+ rx_std_posted = 0;
+ }
next_pkt_nopost:
sw_idx++;
sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
/* run TX completion thread */
if (sblk->idx[0].tx_consumer != tp->tx_cons) {
tg3_tx(tp);
+ if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
+ netif_rx_complete(netdev);
+ schedule_work(&tp->reset_task);
+ return 0;
+ }
}
/* run RX thread, within the bounds set by NAPI.
if (irq_sync)
tg3_irq_quiesce(tp);
spin_lock_bh(&tp->lock);
- spin_lock(&tp->tx_lock);
}
static inline void tg3_full_unlock(struct tg3 *tp)
{
- spin_unlock(&tp->tx_lock);
spin_unlock_bh(&tp->lock);
}
static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int);
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
+ "aborting.\n", tp->dev->name);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ del_timer_sync(&tp->timer);
+ tp->irq_sync = 0;
+ netif_poll_enable(tp->dev);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
+ if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
+ tp->write32_tx_mbox = tg3_write32_tx_mbox;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+ tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
+ }
+
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- tg3_init_hw(tp, 1);
+ if (tg3_init_hw(tp, 1))
+ goto out;
tg3_netif_start(tp);
if (restart_timer)
mod_timer(&tp->timer, jiffies + 1);
+out:
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
len = skb_headlen(skb);
- /* No BH disabling for tx_lock here. We are running in BH disabled
- * context and TX reclaim runs via tp->poll inside of a software
+ /* We are running in BH disabled context with netif_tx_lock
+ * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (!spin_trylock(&tp->tx_lock))
- return NETDEV_TX_LOCKED;
-
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name);
}
- spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY;
}
goto out_unlock;
}
- tcp_opt_len = ((skb->h.th->doff - 5) * 4);
- ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
+ else {
+ tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+ ip_tcp_len = (skb->nh.iph->ihl * 4) +
+ sizeof(struct tcphdr);
+
+ skb->nh.iph->check = 0;
+ skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
+ tcp_opt_len);
+ mss |= (ip_tcp_len + tcp_opt_len) << 9;
+ }
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
- skb->nh.iph->check = 0;
- skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
-
skb->h.th->check = 0;
- mss |= (ip_tcp_len + tcp_opt_len) << 9;
}
else if (skb->ip_summed == CHECKSUM_HW)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
+ if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
}
out_unlock:
mmiowb();
- spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
+#if TG3_TSO_SUPPORT != 0
+static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
+
+/* Use GSO to workaround a rare TSO bug that may be triggered when the
+ * TSO header is greater than 80 bytes.
+ */
+static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+{
+ struct sk_buff *segs, *nskb;
+
+ /* Estimate the number of fragments in the worst case */
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
+ netif_stop_queue(tp->dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
+ if (unlikely(IS_ERR(segs)))
+ goto tg3_tso_bug_end;
+
+ do {
+ nskb = segs;
+ segs = segs->next;
+ nskb->next = NULL;
+ tg3_start_xmit_dma_bug(nskb, tp->dev);
+ } while (segs);
+
+tg3_tso_bug_end:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+#endif
+
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only.
*/
len = skb_headlen(skb);
- /* No BH disabling for tx_lock here. We are running in BH disabled
- * context and TX reclaim runs via tp->poll inside of a software
+ /* We are running in BH disabled context with netif_tx_lock
+ * and TX reclaim runs via tp->poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (!spin_trylock(&tp->tx_lock))
- return NETDEV_TX_LOCKED;
-
- if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
"queue awake!\n", dev->name);
}
- spin_unlock(&tp->tx_lock);
return NETDEV_TX_BUSY;
}
mss = 0;
if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
(mss = skb_shinfo(skb)->gso_size) != 0) {
- int tcp_opt_len, ip_tcp_len;
+ int tcp_opt_len, ip_tcp_len, hdr_len;
if (skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+ hdr_len = ip_tcp_len + tcp_opt_len;
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+ (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
+ return (tg3_tso_bug(tp, skb));
+
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
skb->nh.iph->check = 0;
- skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
+ skb->nh.iph->tot_len = htons(mss + hdr_len);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
skb->h.th->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
- if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
+ if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
netif_wake_queue(tp->dev);
}
out_unlock:
mmiowb();
- spin_unlock(&tp->tx_lock);
dev->trans_start = jiffies;
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
+ int err;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
tg3_set_mtu(dev, tp, new_mtu);
- tg3_init_hw(tp, 0);
+ err = tg3_restart_hw(tp, 0);
- tg3_netif_start(tp);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
- return 0;
+ return err;
}
/* Free up pending packets in all rx/tx rings.
* end up in the driver. tp->{tx,}lock are held and thus
* we may not sleep.
*/
-static void tg3_init_rings(struct tg3 *tp)
+static int tg3_init_rings(struct tg3 *tp)
{
u32 i;
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
- -1, i) < 0)
+ if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX standard ring, "
+ "only %d out of %d buffers were allocated "
+ "successfully.\n",
+ tp->dev->name, i, tp->rx_pending);
+ if (i == 0)
+ return -ENOMEM;
+ tp->rx_pending = i;
break;
+ }
}
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
- -1, i) < 0)
+ -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX jumbo ring, "
+ "only %d out of %d buffers were "
+ "allocated successfully.\n",
+ tp->dev->name, i, tp->rx_jumbo_pending);
+ if (i == 0) {
+ tg3_free_rings(tp);
+ return -ENOMEM;
+ }
+ tp->rx_jumbo_pending = i;
break;
+ }
}
}
+ return 0;
}
/*
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
+ int err = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 0);
-
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 0);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
} else {
spin_lock_bh(&tp->lock);
spin_unlock_bh(&tp->lock);
}
- return 0;
+ return err;
}
/* tp->lock is held. */
* can only do this after the hardware has been
* successfully reset.
*/
- tg3_init_rings(tp);
+ err = tg3_init_rings(tp);
+ if (err)
+ return err;
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
}
/* Setup replenish threshold. */
- tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
+ val = tp->rx_pending / 8;
+ if (val == 0)
+ val = 1;
+ else if (val > tp->rx_std_max_post)
+ val = tp->rx_std_max_post;
+
+ tw32(RCVBDI_STD_THRESH, val);
/* Initialize TG3_BDINFO's at:
* RCVDBDI_STD_BD: standard eth size rx ring
#endif
/* Receive/send statistics. */
- if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
- (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+ if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+ val = tr32(RCVLPC_STATS_ENABLE);
+ val &= ~RCVLPC_STATSENAB_DACK_FIX;
+ tw32(RCVLPC_STATS_ENABLE, val);
+ } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+ (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
val = tr32(RCVLPC_STATS_ENABLE);
val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
tw32(RCVLPC_STATS_ENABLE, val);
fn = tg3_msi;
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
fn = tg3_msi_1shot;
- flags = SA_SAMPLE_RANDOM;
+ flags = IRQF_SAMPLE_RANDOM;
} else {
fn = tg3_interrupt;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
fn = tg3_interrupt_tagged;
- flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
+ flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
}
return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
}
free_irq(tp->pdev->irq, dev);
err = request_irq(tp->pdev->irq, tg3_test_isr,
- SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
if (err)
return err;
return -EINVAL;
return 0;
}
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
+ if (value)
+ dev->features |= NETIF_F_TSO6;
+ else
+ dev->features &= ~NETIF_F_TSO6;
+ }
return ethtool_op_set_tso(dev, value);
}
#endif
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if (netif_running(dev)) {
tg3_netif_stop(tp);
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static u32 tg3_get_rx_csum(struct net_device *dev)
err = -EIO;
tx_len = 1514;
- skb = dev_alloc_skb(tx_len);
+ skb = netdev_alloc_skb(tp->dev, tx_len);
if (!skb)
return -ENOMEM;
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
- tg3_reset_hw(tp, 1);
+ err = tg3_reset_hw(tp, 1);
+ if (err)
+ return TG3_LOOPBACK_FAILED;
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ if (!tg3_restart_hw(tp, 1))
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
{
struct tg3 *tp = netdev_priv(dev);
+ if (netif_running(dev))
+ tg3_netif_stop(tp);
+
tg3_full_lock(tp, 0);
tp->vlgrp = grp;
__tg3_set_rx_mode(dev);
tg3_full_unlock(tp);
+
+ if (netif_running(dev))
+ tg3_netif_start(tp);
}
static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct tg3 *tp = netdev_priv(dev);
+ if (netif_running(dev))
+ tg3_netif_stop(tp);
+
tg3_full_lock(tp, 0);
if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL;
tg3_full_unlock(tp);
+
+ if (netif_running(dev))
+ tg3_netif_start(tp);
}
#endif
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
- } else
- tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
+ } else {
+ tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
+ TG3_FLG2_HW_TSO_1_BUG;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5750 &&
+ tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
+ }
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
tp->rx_offset = 0;
+ tp->rx_std_max_post = TG3_RX_RING_SIZE;
+
+ /* Increment the rx prod index on the rx std ring by at most
+ * 8 for these chips to workaround hw errata.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ tp->rx_std_max_post = 8;
+
/* By default, disable wake-on-lan. User can change this
* using ETHTOOL_SWOL.
*/
struct pcidev_cookie *pcp = pdev->sysdata;
if (pcp != NULL) {
- int node = pcp->prom_node;
+ unsigned char *addr;
+ int len;
- if (prom_getproplen(node, "local-mac-address") == 6) {
- prom_getproperty(node, "local-mac-address",
- dev->dev_addr, 6);
+ addr = of_get_property(pcp->prom_node, "local-mac-address",
+ &len);
+ if (addr && len == 6) {
+ memcpy(dev->dev_addr, addr, 6);
memcpy(dev->perm_addr, dev->dev_addr, 6);
return 0;
}
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register;
tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
#endif
spin_lock_init(&tp->lock);
- spin_lock_init(&tp->tx_lock);
spin_lock_init(&tp->indirect_lock);
INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
- /* TSO is on by default on chips that support HW_TSO_2.
- * Some HW_TSO_1 capable chips have bugs that can lead to
- * tx timeouts in some cases when TSO is enabled.
+ /* TSO is on by default on chips that support hardware TSO.
* Firmware TSO on older chips gives lower performance, so it
* is off by default, but can be enabled using ethtool.
*/
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
- tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2))
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
dev->features |= NETIF_F_TSO;
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
+ dev->features |= NETIF_F_TSO6;
+ }
#endif
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ if (tg3_restart_hw(tp, 1))
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
netif_device_attach(dev);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
}
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (err)
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static struct pci_driver tg3_driver = {