#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.5"
+#define DRV_VERSION "1.3"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
static const int rxqaddr[] = { Q_R1, Q_R2 };
static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
static int skge_get_regs_len(struct net_device *dev)
{
skge->net_stats.rx_bytes = data[1];
skge->net_stats.tx_packets = data[2] + data[4] + data[6];
skge->net_stats.rx_packets = data[3] + data[5] + data[7];
- skge->net_stats.multicast = data[3] + data[5];
+ skge->net_stats.multicast = data[5] + data[7];
skge->net_stats.collisions = data[10];
skge->net_stats.tx_aborted_errors = data[12];
int err;
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
- p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
+ p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
return -EINVAL;
skge->rx_ring.count = p->rx_pending;
* Allocate ring elements and chain them together
* One-to-one association of board descriptors with ring elements
*/
-static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
{
struct skge_tx_desc *d;
struct skge_element *e;
int i;
- ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
+ ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL);
if (!ring->start)
return -ENOMEM;
for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
e->desc = d;
+ e->skb = NULL;
if (i == ring->count - 1) {
e->next = ring->start;
d->next_offset = base;
* Note: DMA address is not changed by chip.
* MTU not changed while receiver active.
*/
-static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
+static void skge_rx_reuse(struct skge_element *e, unsigned int size)
{
struct skge_rx_desc *rd = e->desc;
do {
struct sk_buff *skb;
- skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
+ skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
if (!skb)
return -ENOMEM;
LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
netif_carrier_on(skge->netdev);
- netif_wake_queue(skge->netdev);
+ if (skge->tx_avail > MAX_SKB_FRAGS + 1)
+ netif_wake_queue(skge->netdev);
if (netif_msg_link(skge))
printk(KERN_INFO PFX
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
if (dev->mtu > RX_BUF_SIZE)
- skge->rx_buf_size = dev->mtu + ETH_HLEN;
+ skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
else
skge->rx_buf_size = RX_BUF_SIZE;
if (!skge->mem)
return -ENOMEM;
- BUG_ON(skge->dma & 7);
-
- if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
- printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
- err = -EINVAL;
- goto free_pci_mem;
- }
-
memset(skge->mem, 0, skge->mem_size);
- err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
- if (err)
+ if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
goto free_pci_mem;
err = skge_rx_fill(skge);
if (err)
goto free_rx_ring;
- err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
- skge->dma + rx_size);
- if (err)
+ if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
+ skge->dma + rx_size)))
goto free_rx_ring;
+ skge->tx_avail = skge->tx_ring.count - 1;
+
+ /* Enable IRQ from port */
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= portirqmask[port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
+
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
if (hw->chip_id == CHIP_ID_GENESIS)
else
yukon_stop(skge);
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask &= ~portirqmask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
+
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
return 0;
}
-static inline int skge_avail(const struct skge_ring *ring)
-{
- return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
- + (ring->to_clean - ring->to_use) - 1;
-}
-
static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
int i;
u32 control, len;
u64 map;
+ unsigned long flags;
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
return NETDEV_TX_OK;
+ local_irq_save(flags);
if (!spin_trylock(&skge->tx_lock)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
+ /* Collision - tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
- if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
+ if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
dev->name);
}
- spin_unlock(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
return NETDEV_TX_BUSY;
}
dev->name, e - ring->start, skb->len);
ring->to_use = e->next;
- if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
+ skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1;
+ if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
pr_debug("%s: transmit queue full\n", dev->name);
netif_stop_queue(dev);
}
- mmiowb();
- spin_unlock(&skge->tx_lock);
-
dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
return NETDEV_TX_OK;
}
-static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
+static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
{
- struct pci_dev *pdev = skge->hw->pdev;
- struct skge_element *e;
-
- for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
- struct sk_buff *skb = e->skb;
- int i;
-
+ /* This ring element can be skb or fragment */
+ if (e->skb) {
+ pci_unmap_single(hw->pdev,
+ pci_unmap_addr(e, mapaddr),
+ pci_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(e->skb);
e->skb = NULL;
- pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
- skb_headlen(skb), PCI_DMA_TODEVICE);
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- e = e->next;
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
- skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- }
-
- dev_kfree_skb(skb);
+ } else {
+ pci_unmap_page(hw->pdev,
+ pci_unmap_addr(e, mapaddr),
+ pci_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
}
- skge->tx_ring.to_clean = e;
}
static void skge_tx_clean(struct skge_port *skge)
{
+ struct skge_ring *ring = &skge->tx_ring;
+ struct skge_element *e;
+ unsigned long flags;
- spin_lock_bh(&skge->tx_lock);
- skge_tx_complete(skge, skge->tx_ring.to_use);
- netif_wake_queue(skge->netdev);
- spin_unlock_bh(&skge->tx_lock);
+ spin_lock_irqsave(&skge->tx_lock, flags);
+ for (e = ring->to_clean; e != ring->to_use; e = e->next) {
+ ++skge->tx_avail;
+ skge_tx_free(skge->hw, e);
+ }
+ ring->to_clean = e;
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
}
static void skge_tx_timeout(struct net_device *dev)
goto error;
if (len < RX_COPY_THRESHOLD) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
+ skb = dev_alloc_skb(len + 2);
if (!skb)
goto resubmit;
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
- nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
+ nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
if (!nskb)
goto resubmit;
- skb_reserve(nskb, NET_IP_ALIGN);
pci_unmap_single(skge->hw->pdev,
pci_unmap_addr(e, mapaddr),
pci_unmap_len(e, maplen),
return NULL;
}
-static void skge_tx_done(struct skge_port *skge)
-{
- struct skge_ring *ring = &skge->tx_ring;
- struct skge_element *e, *last;
-
- spin_lock(&skge->tx_lock);
- last = ring->to_clean;
- for (e = ring->to_clean; e != ring->to_use; e = e->next) {
- struct skge_tx_desc *td = e->desc;
-
- if (td->control & BMU_OWN)
- break;
-
- if (td->control & BMU_EOF) {
- last = e->next;
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - ring->start);
- }
- }
-
- skge_tx_complete(skge, last);
-
- skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
-
- if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
- netif_wake_queue(skge->netdev);
-
- spin_unlock(&skge->tx_lock);
-}
static int skge_poll(struct net_device *dev, int *budget)
{
struct skge_hw *hw = skge->hw;
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
- int to_do = min(dev->quota, *budget);
- int work_done = 0;
-
- skge_tx_done(skge);
+ unsigned int to_do = min(dev->quota, *budget);
+ unsigned int work_done = 0;
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
if (control & BMU_OWN)
break;
- skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
+ skb = skge_rx_get(skge, e, control, rd->status,
+ le16_to_cpu(rd->csum2));
if (likely(skb)) {
dev->last_rx = jiffies;
netif_receive_skb(skb);
++work_done;
- }
+ } else
+ skge_rx_reuse(e, skge->rx_buf_size);
}
ring->to_clean = e;
if (work_done >= to_do)
return 1; /* not done */
- netif_rx_complete(dev);
- mmiowb();
-
- hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
+ spin_lock_irq(&hw->hw_lock);
+ __netif_rx_complete(dev);
+ hw->intr_mask |= portirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
return 0;
}
+static inline void skge_tx_intr(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ struct skge_ring *ring = &skge->tx_ring;
+ struct skge_element *e;
+
+ spin_lock(&skge->tx_lock);
+ for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+ u32 control;
+
+ rmb();
+ control = td->control;
+ if (control & BMU_OWN)
+ break;
+
+ if (unlikely(netif_msg_tx_done(skge)))
+ printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
+ dev->name, e - ring->start, td->status);
+
+ skge_tx_free(hw, e);
+ e->skb = NULL;
+ ++skge->tx_avail;
+ }
+ ring->to_clean = e;
+ skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ if (skge->tx_avail > MAX_SKB_FRAGS + 1)
+ netif_wake_queue(dev);
+
+ spin_unlock(&skge->tx_lock);
+}
+
/* Parity errors seem to happen when Genesis is connected to a switch
* with no other ports present. Heartbeat error??
*/
? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
}
+static void skge_pci_clear(struct skge_hw *hw)
+{
+ u16 status;
+
+ pci_read_config_word(hw->pdev, PCI_STATUS, &status);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_write_config_word(hw->pdev, PCI_STATUS,
+ status | PCI_STATUS_ERROR_BITS);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+}
+
static void skge_mac_intr(struct skge_hw *hw, int port)
{
if (hw->chip_id == CHIP_ID_GENESIS)
if (hwstatus & IS_M2_PAR_ERR)
skge_mac_parity(hw, 1);
- if (hwstatus & IS_R1_PAR_ERR) {
- printk(KERN_ERR PFX "%s: receive queue parity error\n",
- hw->dev[0]->name);
+ if (hwstatus & IS_R1_PAR_ERR)
skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
- }
- if (hwstatus & IS_R2_PAR_ERR) {
- printk(KERN_ERR PFX "%s: receive queue parity error\n",
- hw->dev[1]->name);
+ if (hwstatus & IS_R2_PAR_ERR)
skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
- }
if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
- u16 pci_status, pci_cmd;
+ printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n",
+ hwstatus);
- pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
- pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
-
- printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
- pci_name(hw->pdev), pci_cmd, pci_status);
-
- /* Write the error bits back to clear them. */
- pci_status &= PCI_STATUS_ERROR_BITS;
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_word(hw->pdev, PCI_COMMAND,
- pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
- pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ skge_pci_clear(hw);
/* if error still set then just ignore it */
hwstatus = skge_read32(hw, B0_HWE_ISRC);
if (hwstatus & IS_IRQ_STAT) {
- printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
+ pr_debug("IRQ status %x: still set ignoring hardware errors\n",
+ hwstatus);
hw->intr_mask &= ~IS_HW_ERR;
}
}
int port;
spin_lock(&hw->phy_lock);
- for (port = 0; port < hw->ports; port++) {
+ for (port = 0; port < 2; port++) {
struct net_device *dev = hw->dev[port];
- struct skge_port *skge = netdev_priv(dev);
- if (netif_running(dev)) {
+ if (dev && netif_running(dev)) {
+ struct skge_port *skge = netdev_priv(dev);
+
if (hw->chip_id != CHIP_ID_GENESIS)
yukon_phy_intr(skge);
else
}
spin_unlock(&hw->phy_lock);
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct skge_hw *hw = dev_id;
- u32 status;
+ u32 status = skge_read32(hw, B0_SP_ISRC);
- /* Reading this register masks IRQ */
- status = skge_read32(hw, B0_SP_ISRC);
- if (status == 0)
+ if (status == 0 || status == ~0) /* hotplug or shared irq */
return IRQ_NONE;
- if (status & IS_EXT_REG) {
- hw->intr_mask &= ~IS_EXT_REG;
- tasklet_schedule(&hw->ext_tasklet);
- }
-
- if (status & (IS_R1_F|IS_XA1_F)) {
+ spin_lock(&hw->hw_lock);
+ if (status & IS_R1_F) {
skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
+ hw->intr_mask &= ~IS_R1_F;
netif_rx_schedule(hw->dev[0]);
}
- if (status & (IS_R2_F|IS_XA2_F)) {
+ if (status & IS_R2_F) {
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
+ hw->intr_mask &= ~IS_R2_F;
netif_rx_schedule(hw->dev[1]);
}
- if (likely((status & hw->intr_mask) == 0))
- return IRQ_HANDLED;
+ if (status & IS_XA1_F)
+ skge_tx_intr(hw->dev[0]);
+
+ if (status & IS_XA2_F)
+ skge_tx_intr(hw->dev[1]);
if (status & IS_PA_TO_RX1) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
if (status & IS_HW_ERR)
skge_error_irq(hw);
+ if (status & IS_EXT_REG) {
+ hw->intr_mask &= ~IS_EXT_REG;
+ tasklet_schedule(&hw->ext_tasklet);
+ }
+
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
static int skge_reset(struct skge_hw *hw)
{
u32 reg;
- u16 ctst, pci_status;
+ u16 ctst;
u8 t8, mac_cfg, pmd_type, phy_type;
int i;
skge_write8(hw, B0_CTST, CS_RST_CLR);
/* clear PCI errors, if any */
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- skge_write8(hw, B2_TST_CTRL2, 0);
+ skge_pci_clear(hw);
- pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
- pci_write_config_word(hw->pdev, PCI_STATUS,
- pci_status | PCI_STATUS_ERROR_BITS);
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
skge_write8(hw, B0_CTST, CS_MRST_CLR);
/* restore CLK_RUN bits (for Yukon-Lite) */
else
hw->ram_size = t8 * 4096;
- hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
- if (hw->ports > 1)
- hw->intr_mask |= IS_PORT_2;
-
+ hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_init(hw);
else {
struct skge_hw *hw;
int err, using_dac = 0;
- err = pci_enable_device(pdev);
- if (err) {
+ if ((err = pci_enable_device(pdev))) {
printk(KERN_ERR PFX "%s cannot enable PCI device\n",
pci_name(pdev));
goto err_out;
}
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
+ if ((err = pci_request_regions(pdev, DRV_NAME))) {
printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
pci_name(pdev));
goto err_out_disable_pdev;
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ if (sizeof(dma_addr_t) > sizeof(u32) &&
+ !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
- using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
- }
-
- if (err) {
- printk(KERN_ERR PFX "%s no usable DMA configuration\n",
- pci_name(pdev));
- goto err_out_free_regions;
+ if (err < 0) {
+ printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+ "for consistent allocations\n", pci_name(pdev));
+ goto err_out_free_regions;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+ pci_name(pdev));
+ goto err_out_free_regions;
+ }
}
#ifdef __BIG_ENDIAN
hw->pdev = pdev;
spin_lock_init(&hw->phy_lock);
+ spin_lock_init(&hw->hw_lock);
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
goto err_out_free_hw;
}
- err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw);
- if (err) {
+ if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
goto err_out_iounmap;
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
goto err_out_led_off;
- err = register_netdev(dev);
- if (err) {
+ if ((err = register_netdev(dev))) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
pci_name(pdev));
goto err_out_free_netdev;
skge_write32(hw, B0_IMSK, 0);
skge_write16(hw, B0_LED, LED_STAT_OFF);
+ skge_pci_clear(hw);
skge_write8(hw, B0_CTST, CS_RST_SET);
tasklet_kill(&hw->ext_tasklet);