#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/version.h>
+#include <linux/dma-mapping.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#define DRV_MODULE_NAME "b44"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.94"
-#define DRV_MODULE_RELDATE "May 4, 2004"
+#define DRV_MODULE_VERSION "0.97"
+#define DRV_MODULE_RELDATE "Nov 30, 2005"
#define B44_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
B44_TX_RING_SIZE)
+#define B44_DMA_MASK 0x3fffffff
#define TX_RING_GAP(BP) \
(B44_TX_RING_SIZE - (BP)->tx_pending)
#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
+#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
/* minimum number of free TX descriptors required to wake up TX process */
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
+MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_PARM(b44_debug, "i");
-MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
+MODULE_VERSION(DRV_MODULE_VERSION);
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
+module_param(b44_debug, int, 0);
+MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
static struct pci_device_id b44_pci_tbl[] = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
static void b44_init_rings(struct b44 *);
static void b44_init_hw(struct b44 *);
+static int dma_desc_align_mask;
+static int dma_desc_sync_size;
+
+static const char b44_gstrings[][ETH_GSTRING_LEN] = {
+#define _B44(x...) # x,
+B44_STAT_REG_DECLARE
+#undef _B44
+};
+
+static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(&pdev->dev, dma_base,
+ offset & dma_desc_align_mask,
+ dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
+ offset & dma_desc_align_mask,
+ dma_desc_sync_size, dir);
+}
+
+static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+{
+ return readl(bp->regs + reg);
+}
+
+static inline void bw32(const struct b44 *bp,
+ unsigned long reg, unsigned long val)
+{
+ writel(val, bp->regs + reg);
+}
+
static int b44_wait_bit(struct b44 *bp, unsigned long reg,
u32 bit, unsigned long timeout, const int clear)
{
unsigned long i;
for (i = 0; i < timeout; i++) {
- u32 val = br32(reg);
+ u32 val = br32(bp, reg);
if (clear && !(val & bit))
break;
* interrupts disabled.
*/
-#define SBID_SDRAM 0
-#define SBID_PCI_MEM 1
-#define SBID_PCI_CFG 2
-#define SBID_PCI_DMA 3
-#define SBID_SDRAM_SWAPPED 4
-#define SBID_ENUM 5
-#define SBID_REG_SDRAM 6
-#define SBID_REG_ILINE20 7
-#define SBID_REG_EMAC 8
-#define SBID_REG_CODEC 9
-#define SBID_REG_USB 10
-#define SBID_REG_PCI 11
-#define SBID_REG_MIPS 12
-#define SBID_REG_EXTIF 13
-#define SBID_EXTIF 14
-#define SBID_EJTAG 15
-#define SBID_MAX 16
-
-static u32 ssb_get_addr(struct b44 *bp, u32 id, u32 instance)
-{
- switch (id) {
- case SBID_PCI_DMA:
- return 0x40000000;
- case SBID_ENUM:
- return 0x18000000;
- case SBID_REG_EMAC:
- return 0x18000000;
- case SBID_REG_CODEC:
- return 0x18001000;
- case SBID_REG_PCI:
- return 0x18002000;
- default:
- return 0;
- };
-}
+#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
+#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
static u32 ssb_get_core_rev(struct b44 *bp)
{
- return (br32(B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
+ return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
}
static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
u32 bar_orig, pci_rev, val;
pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
- pci_write_config_dword(bp->pdev, SSB_BAR0_WIN,
- ssb_get_addr(bp, SBID_REG_PCI, 0));
+ pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
pci_rev = ssb_get_core_rev(bp);
- val = br32(B44_SBINTVEC);
+ val = br32(bp, B44_SBINTVEC);
val |= cores;
- bw32(B44_SBINTVEC, val);
+ bw32(bp, B44_SBINTVEC, val);
- val = br32(SSB_PCI_TRANS_2);
+ val = br32(bp, SSB_PCI_TRANS_2);
val |= SSB_PCI_PREF | SSB_PCI_BURST;
- bw32(SSB_PCI_TRANS_2, val);
+ bw32(bp, SSB_PCI_TRANS_2, val);
pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
static void ssb_core_disable(struct b44 *bp)
{
- if (br32(B44_SBTMSLOW) & SBTMSLOW_RESET)
+ if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
return;
- bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
- bw32(B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
SBTMSLOW_REJECT | SBTMSLOW_RESET));
- br32(B44_SBTMSLOW);
+ br32(bp, B44_SBTMSLOW);
udelay(1);
- bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
- br32(B44_SBTMSLOW);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
+ br32(bp, B44_SBTMSLOW);
udelay(1);
}
u32 val;
ssb_core_disable(bp);
- bw32(B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
- br32(B44_SBTMSLOW);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
+ br32(bp, B44_SBTMSLOW);
udelay(1);
/* Clear SERR if set, this is a hw bug workaround. */
- if (br32(B44_SBTMSHIGH) & SBTMSHIGH_SERR)
- bw32(B44_SBTMSHIGH, 0);
+ if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
+ bw32(bp, B44_SBTMSHIGH, 0);
- val = br32(B44_SBIMSTATE);
+ val = br32(bp, B44_SBIMSTATE);
if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
- bw32(B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
+ bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
- bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
- br32(B44_SBTMSLOW);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
+ br32(bp, B44_SBTMSLOW);
udelay(1);
- bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK));
- br32(B44_SBTMSLOW);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
+ br32(bp, B44_SBTMSLOW);
udelay(1);
}
static int ssb_core_unit(struct b44 *bp)
{
#if 0
- u32 val = br32(B44_SBADMATCH0);
+ u32 val = br32(bp, B44_SBADMATCH0);
u32 base;
type = val & SBADMATCH0_TYPE_MASK;
static int ssb_is_core_up(struct b44 *bp)
{
- return ((br32(B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
+ return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
== SBTMSLOW_CLOCK);
}
val |= ((u32) data[3]) << 16;
val |= ((u32) data[4]) << 8;
val |= ((u32) data[5]) << 0;
- bw32(B44_CAM_DATA_LO, val);
+ bw32(bp, B44_CAM_DATA_LO, val);
val = (CAM_DATA_HI_VALID |
(((u32) data[0]) << 8) |
(((u32) data[1]) << 0));
- bw32(B44_CAM_DATA_HI, val);
- bw32(B44_CAM_CTRL, (CAM_CTRL_WRITE |
+ bw32(bp, B44_CAM_DATA_HI, val);
+ bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
(index << CAM_CTRL_INDEX_SHIFT)));
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
}
static inline void __b44_disable_ints(struct b44 *bp)
{
- bw32(B44_IMASK, 0);
+ bw32(bp, B44_IMASK, 0);
}
static void b44_disable_ints(struct b44 *bp)
__b44_disable_ints(bp);
/* Flush posted writes. */
- br32(B44_IMASK);
+ br32(bp, B44_IMASK);
}
static void b44_enable_ints(struct b44 *bp)
{
- bw32(B44_IMASK, bp->imask);
+ bw32(bp, B44_IMASK, bp->imask);
}
static int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
int err;
- bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
- bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
+ bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+ bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
(MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
(bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
(reg << MDIO_DATA_RA_SHIFT) |
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
- *val = br32(B44_MDIO_DATA) & MDIO_DATA_DATA;
+ *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
return err;
}
static int b44_writephy(struct b44 *bp, int reg, u32 val)
{
- bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
- bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
+ bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+ bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
(MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
(bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
(reg << MDIO_DATA_RA_SHIFT) |
bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
bp->flags |= pause_flags;
- val = br32(B44_RXCONFIG);
+ val = br32(bp, B44_RXCONFIG);
if (pause_flags & B44_FLAG_RX_PAUSE)
val |= RXCONFIG_FLOW;
else
val &= ~RXCONFIG_FLOW;
- bw32(B44_RXCONFIG, val);
+ bw32(bp, B44_RXCONFIG, val);
- val = br32(B44_MAC_FLOW);
+ val = br32(bp, B44_MAC_FLOW);
if (pause_flags & B44_FLAG_TX_PAUSE)
val |= (MAC_FLOW_PAUSE_ENAB |
(0xc0 & MAC_FLOW_RX_HI_WATER));
else
val &= ~MAC_FLOW_PAUSE_ENAB;
- bw32(B44_MAC_FLOW, val);
+ bw32(bp, B44_MAC_FLOW, val);
}
static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
val = &bp->hw_stats.tx_good_octets;
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
- *val++ += br32(reg);
+ *val++ += br32(bp, reg);
}
- val = &bp->hw_stats.rx_good_octets;
+
+ /* Pad */
+ reg += 8*4UL;
+
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
- *val++ += br32(reg);
+ *val++ += br32(bp, reg);
}
}
if (!netif_carrier_ok(bp->dev) &&
(bmsr & BMSR_LSTATUS)) {
- u32 val = br32(B44_TX_CTRL);
+ u32 val = br32(bp, B44_TX_CTRL);
u32 local_adv, remote_adv;
if (bp->flags & B44_FLAG_FULL_DUPLEX)
val |= TX_CTRL_DUPLEX;
else
val &= ~TX_CTRL_DUPLEX;
- bw32(B44_TX_CTRL, val);
+ bw32(bp, B44_TX_CTRL, val);
if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
!b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
{
u32 cur, cons;
- cur = br32(B44_DMATX_STAT) & DMATX_STAT_CDMASK;
+ cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
cur /= sizeof(struct dma_desc);
/* XXX needs updating when NETIF_F_SG is supported */
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
netif_wake_queue(bp->dev);
- bw32(B44_GPTIMER, 0);
+ bw32(bp, B44_GPTIMER, 0);
}
/* Works like this. This chip writes a 'struct rx_header" 30 bytes
if (skb == NULL)
return -ENOMEM;
- skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
+
+ /* Hardware bug work-around, the chip is unable to do PCI DMA
+ to/from anything above 1GB :-( */
+ if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ /* Sigh... */
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
+ if (skb == NULL)
+ return -ENOMEM;
+ mapping = pci_map_single(bp->pdev, skb->data,
+ RX_PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+
+ skb->dev = bp->dev;
skb_reserve(skb, bp->rx_offset);
rh = (struct rx_header *)
dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+ dest_idx * sizeof(dp),
+ DMA_BIDIRECTIONAL);
+
return RX_PKT_BUF_SZ;
}
pci_unmap_addr_set(dest_map, mapping,
pci_unmap_addr(src_map, mapping));
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
+ src_idx * sizeof(src_desc),
+ DMA_BIDIRECTIONAL);
+
ctrl = src_desc->ctrl;
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= cpu_to_le32(DESC_CTRL_EOT);
dest_desc->ctrl = ctrl;
dest_desc->addr = src_desc->addr;
+
src_map->skb = NULL;
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+ dest_idx * sizeof(dest_desc),
+ DMA_BIDIRECTIONAL);
+
pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
u32 cons, prod;
received = 0;
- prod = br32(B44_DMARX_STAT) & DMARX_STAT_CDMASK;
+ prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
prod /= sizeof(struct dma_desc);
cons = bp->rx_cons;
}
bp->rx_cons = cons;
- bw32(B44_DMARX_PTR, cons * sizeof(struct dma_desc));
+ bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
return received;
}
{
struct net_device *dev = dev_id;
struct b44 *bp = netdev_priv(dev);
- unsigned long flags;
u32 istat, imask;
int handled = 0;
- spin_lock_irqsave(&bp->lock, flags);
+ spin_lock(&bp->lock);
- istat = br32(B44_ISTAT);
- imask = br32(B44_IMASK);
+ istat = br32(bp, B44_ISTAT);
+ imask = br32(bp, B44_IMASK);
/* ??? What the fuck is the purpose of the interrupt mask
* ??? register if we have to mask it out by hand anyways?
istat &= imask;
if (istat) {
handled = 1;
+
+ if (unlikely(!netif_running(dev))) {
+ printk(KERN_INFO "%s: late interrupt.\n", dev->name);
+ goto irq_ack;
+ }
+
if (netif_rx_schedule_prep(dev)) {
/* NOTE: These writes are posted by the readback of
* the ISTAT register below.
dev->name);
}
- bw32(B44_ISTAT, istat);
- br32(B44_ISTAT);
+irq_ack:
+ bw32(bp, B44_ISTAT, istat);
+ br32(bp, B44_ISTAT);
}
- spin_unlock_irqrestore(&bp->lock, flags);
+ spin_unlock(&bp->lock);
return IRQ_RETVAL(handled);
}
static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
+ struct sk_buff *bounce_skb;
+ int rc = NETDEV_TX_OK;
dma_addr_t mapping;
u32 len, entry, ctrl;
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
netif_stop_queue(dev);
- spin_unlock_irq(&bp->lock);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
- return 1;
+ goto err_out;
}
- entry = bp->tx_prod;
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (mapping + len > B44_DMA_MASK) {
+ /* Chip can't handle DMA to/from >1GB, use bounce buffer */
+ pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
+
+ bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
+ GFP_ATOMIC|GFP_DMA);
+ if (!bounce_skb)
+ goto err_out;
+
+ mapping = pci_map_single(bp->pdev, bounce_skb->data,
+ len, PCI_DMA_TODEVICE);
+ if (mapping + len > B44_DMA_MASK) {
+ pci_unmap_single(bp->pdev, mapping,
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(bounce_skb);
+ goto err_out;
+ }
+
+ memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ skb = bounce_skb;
+ }
+ entry = bp->tx_prod;
bp->tx_buffers[entry].skb = skb;
pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
+ entry * sizeof(bp->tx_ring[0]),
+ DMA_TO_DEVICE);
+
entry = NEXT_TX(entry);
bp->tx_prod = entry;
wmb();
- bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+ bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
if (bp->flags & B44_FLAG_BUGGY_TXPTR)
- bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+ bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
if (bp->flags & B44_FLAG_REORDER_BUG)
- br32(B44_DMATX_PTR);
+ br32(bp, B44_DMATX_PTR);
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
+ dev->trans_start = jiffies;
+
+out_unlock:
spin_unlock_irq(&bp->lock);
- dev->trans_start = jiffies;
+ return rc;
- return 0;
+err_out:
+ rc = NETDEV_TX_BUSY;
+ goto out_unlock;
}
static int b44_change_mtu(struct net_device *dev, int new_mtu)
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
- * end up in the driver. bp->lock is not held and we are not
- * in an interrupt context and thus may sleep.
+ * end up in the driver.
*/
static void b44_init_rings(struct b44 *bp)
{
memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES,
+ PCI_DMA_TODEVICE);
+
for (i = 0; i < bp->rx_pending; i++) {
if (b44_alloc_rx_skb(bp, -1, i) < 0)
break;
*/
static void b44_free_consistent(struct b44 *bp)
{
- if (bp->rx_buffers) {
- kfree(bp->rx_buffers);
- bp->rx_buffers = NULL;
- }
- if (bp->tx_buffers) {
- kfree(bp->tx_buffers);
- bp->tx_buffers = NULL;
- }
+ kfree(bp->rx_buffers);
+ bp->rx_buffers = NULL;
+ kfree(bp->tx_buffers);
+ bp->tx_buffers = NULL;
if (bp->rx_ring) {
- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
- bp->rx_ring, bp->rx_ring_dma);
+ if (bp->flags & B44_FLAG_RX_RING_HACK) {
+ dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+ kfree(bp->rx_ring);
+ } else
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
+ bp->flags &= ~B44_FLAG_RX_RING_HACK;
}
if (bp->tx_ring) {
- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
- bp->tx_ring, bp->tx_ring_dma);
+ if (bp->flags & B44_FLAG_TX_RING_HACK) {
+ dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+ kfree(bp->tx_ring);
+ } else
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
+ bp->flags &= ~B44_FLAG_TX_RING_HACK;
}
}
int size;
size = B44_RX_RING_SIZE * sizeof(struct ring_info);
- bp->rx_buffers = kmalloc(size, GFP_KERNEL);
+ bp->rx_buffers = kzalloc(size, GFP_KERNEL);
if (!bp->rx_buffers)
goto out_err;
- memset(bp->rx_buffers, 0, size);
size = B44_TX_RING_SIZE * sizeof(struct ring_info);
- bp->tx_buffers = kmalloc(size, GFP_KERNEL);
+ bp->tx_buffers = kzalloc(size, GFP_KERNEL);
if (!bp->tx_buffers)
goto out_err;
- memset(bp->tx_buffers, 0, size);
size = DMA_TABLE_BYTES;
bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
- if (!bp->rx_ring)
- goto out_err;
+ if (!bp->rx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+
+ rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!rx_ring)
+ goto out_err;
+
+ rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+
+ if (rx_ring_dma + size > B44_DMA_MASK) {
+ kfree(rx_ring);
+ goto out_err;
+ }
+
+ bp->rx_ring = rx_ring;
+ bp->rx_ring_dma = rx_ring_dma;
+ bp->flags |= B44_FLAG_RX_RING_HACK;
+ }
bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
- if (!bp->tx_ring)
- goto out_err;
+ if (!bp->tx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *tx_ring;
+ dma_addr_t tx_ring_dma;
+
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!tx_ring)
+ goto out_err;
+
+ tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+
+ if (tx_ring_dma + size > B44_DMA_MASK) {
+ kfree(tx_ring);
+ goto out_err;
+ }
+
+ bp->tx_ring = tx_ring;
+ bp->tx_ring_dma = tx_ring_dma;
+ bp->flags |= B44_FLAG_TX_RING_HACK;
+ }
return 0;
{
unsigned long reg;
- bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
- br32(reg);
+ br32(bp, reg);
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
- br32(reg);
+ br32(bp, reg);
}
/* bp->lock is held. */
static void b44_chip_reset(struct b44 *bp)
{
if (ssb_is_core_up(bp)) {
- bw32(B44_RCV_LAZY, 0);
- bw32(B44_ENET_CTRL, ENET_CTRL_DISABLE);
+ bw32(bp, B44_RCV_LAZY, 0);
+ bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
- bw32(B44_DMATX_CTRL, 0);
+ bw32(bp, B44_DMATX_CTRL, 0);
bp->tx_prod = bp->tx_cons = 0;
- if (br32(B44_DMARX_STAT) & DMARX_STAT_EMASK) {
+ if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
100, 0);
}
- bw32(B44_DMARX_CTRL, 0);
+ bw32(bp, B44_DMARX_CTRL, 0);
bp->rx_prod = bp->rx_cons = 0;
} else {
ssb_pci_setup(bp, (bp->core_unit == 0 ?
b44_clear_stats(bp);
/* Make PHY accessible. */
- bw32(B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
(0x0d & MDIO_CTRL_MAXF_MASK)));
- br32(B44_MDIO_CTRL);
+ br32(bp, B44_MDIO_CTRL);
- if (!(br32(B44_DEVCTRL) & DEVCTRL_IPP)) {
- bw32(B44_ENET_CTRL, ENET_CTRL_EPSEL);
- br32(B44_ENET_CTRL);
+ if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
+ bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
+ br32(bp, B44_ENET_CTRL);
bp->flags &= ~B44_FLAG_INTERNAL_PHY;
} else {
- u32 val = br32(B44_DEVCTRL);
+ u32 val = br32(bp, B44_DEVCTRL);
if (val & DEVCTRL_EPR) {
- bw32(B44_DEVCTRL, (val & ~DEVCTRL_EPR));
- br32(B44_DEVCTRL);
+ bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
+ br32(bp, B44_DEVCTRL);
udelay(100);
}
bp->flags |= B44_FLAG_INTERNAL_PHY;
/* bp->lock is held. */
static void __b44_set_mac_addr(struct b44 *bp)
{
- bw32(B44_CAM_CTRL, 0);
+ bw32(bp, B44_CAM_CTRL, 0);
if (!(bp->dev->flags & IFF_PROMISC)) {
u32 val;
__b44_cam_write(bp, bp->dev->dev_addr, 0);
- val = br32(B44_CAM_CTRL);
- bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+ val = br32(bp, B44_CAM_CTRL);
+ bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
}
b44_setup_phy(bp);
/* Enable CRC32, set proper LED modes and power on PHY */
- bw32(B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
- bw32(B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
+ bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
+ bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
/* This sets the MAC address too. */
__b44_set_rx_mode(bp->dev);
/* MTU + eth header + possible VLAN tag + struct rx_header */
- bw32(B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
- bw32(B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+ bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+ bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
- bw32(B44_TX_WMARK, 56); /* XXX magic */
- bw32(B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
- bw32(B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
- bw32(B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+ bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
+ bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+ bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+ bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
(bp->rx_offset << DMARX_CTRL_ROSHIFT)));
- bw32(B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+ bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
- bw32(B44_DMARX_PTR, bp->rx_pending);
+ bw32(bp, B44_DMARX_PTR, bp->rx_pending);
bp->rx_prod = bp->rx_pending;
- bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
- val = br32(B44_ENET_CTRL);
- bw32(B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+ val = br32(bp, B44_ENET_CTRL);
+ bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
}
static int b44_open(struct net_device *dev)
err = b44_alloc_consistent(bp);
if (err)
- return err;
-
- err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
- if (err)
- goto err_out_free;
-
- spin_lock_irq(&bp->lock);
+ goto out;
b44_init_rings(bp);
b44_init_hw(bp);
- bp->flags |= B44_FLAG_INIT_COMPLETE;
- spin_unlock_irq(&bp->lock);
+ b44_check_phy(bp);
+
+ err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
+ if (unlikely(err < 0)) {
+ b44_chip_reset(bp);
+ b44_free_rings(bp);
+ b44_free_consistent(bp);
+ goto out;
+ }
init_timer(&bp->timer);
bp->timer.expires = jiffies + HZ;
add_timer(&bp->timer);
b44_enable_ints(bp);
-
- return 0;
-
-err_out_free:
- b44_free_consistent(bp);
+ netif_start_queue(dev);
+out:
return err;
}
}
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void b44_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ b44_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
static int b44_close(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
netif_stop_queue(dev);
+ netif_poll_disable(dev);
+
del_timer_sync(&bp->timer);
spin_lock_irq(&bp->lock);
#endif
b44_halt(bp);
b44_free_rings(bp);
- bp->flags &= ~B44_FLAG_INIT_COMPLETE;
- netif_carrier_off(bp->dev);
+ netif_carrier_off(dev);
spin_unlock_irq(&bp->lock);
free_irq(dev->irq, dev);
+ netif_poll_enable(dev);
+
b44_free_consistent(bp);
return 0;
hwstat->rx_symbol_errs);
nstat->tx_aborted_errors = hwstat->tx_underruns;
+#if 0
+ /* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+#endif
return nstat;
}
{
struct b44 *bp = netdev_priv(dev);
u32 val;
- int i=0;
- unsigned char zero[6] = {0,0,0,0,0,0};
- val = br32(B44_RXCONFIG);
+ val = br32(bp, B44_RXCONFIG);
val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
if (dev->flags & IFF_PROMISC) {
val |= RXCONFIG_PROMISC;
- bw32(B44_RXCONFIG, val);
+ bw32(bp, B44_RXCONFIG, val);
} else {
+ unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
+ int i = 0;
+
__b44_set_mac_addr(bp);
if (dev->flags & IFF_ALLMULTI)
val |= RXCONFIG_ALLMULTI;
else
- i=__b44_load_mcast(bp, dev);
+ i = __b44_load_mcast(bp, dev);
- for(;i<64;i++) {
+ for (; i < 64; i++) {
__b44_cam_write(bp, zero, i);
}
- bw32(B44_RXCONFIG, val);
- val = br32(B44_CAM_CTRL);
- bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+ bw32(bp, B44_RXCONFIG, val);
+ val = br32(bp, B44_CAM_CTRL);
+ bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
}
{
struct b44 *bp = netdev_priv(dev);
- if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
+ if (!netif_running(dev))
return -EAGAIN;
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
cmd->advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
- cmd->advertising |= ADVERTISE_10HALF;
+ cmd->advertising |= ADVERTISED_10baseT_Half;
if (bp->flags & B44_FLAG_ADV_10FULL)
- cmd->advertising |= ADVERTISE_10FULL;
+ cmd->advertising |= ADVERTISED_10baseT_Full;
if (bp->flags & B44_FLAG_ADV_100HALF)
- cmd->advertising |= ADVERTISE_100HALF;
+ cmd->advertising |= ADVERTISED_100baseT_Half;
if (bp->flags & B44_FLAG_ADV_100FULL)
- cmd->advertising |= ADVERTISE_100FULL;
- cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
SPEED_100 : SPEED_10;
cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
{
struct b44 *bp = netdev_priv(dev);
- if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
+ if (!netif_running(dev))
return -EAGAIN;
/* We do not support gigabit. */
return 0;
}
+static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch(stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
+ break;
+ }
+}
+
+static int b44_get_stats_count(struct net_device *dev)
+{
+ return ARRAY_SIZE(b44_gstrings);
+}
+
+static void b44_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 *val = &bp->hw_stats.tx_good_octets;
+ u32 i;
+
+ spin_lock_irq(&bp->lock);
+
+ b44_stats_update(bp);
+
+ for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+ *data++ = *val++;
+
+ spin_unlock_irq(&bp->lock);
+}
+
static struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
.get_settings = b44_get_settings,
.set_pauseparam = b44_set_pauseparam,
.get_msglevel = b44_get_msglevel,
.set_msglevel = b44_set_msglevel,
+ .get_strings = b44_get_strings,
+ .get_stats_count = b44_get_stats_count,
+ .get_ethtool_stats = b44_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct b44 *bp = netdev_priv(dev);
- int err;
+ int err = -EINVAL;
+
+ if (!netif_running(dev))
+ goto out;
spin_lock_irq(&bp->lock);
err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
spin_unlock_irq(&bp->lock);
-
+out:
return err;
}
bp->dev->dev_addr[3] = eeprom[80];
bp->dev->dev_addr[4] = eeprom[83];
bp->dev->dev_addr[5] = eeprom[82];
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
bp->phy_addr = eeprom[90] & 0x1f;
- bp->mdc_port = (eeprom[90] >> 14) & 0x1;
/* With this, plus the rx_header prepended to the data by the
* hardware, we'll land the ethernet header on a 2-byte boundary.
bp->imask = IMASK_DEF;
bp->core_unit = ssb_core_unit(bp);
- bp->dma_offset = ssb_get_addr(bp, SBID_PCI_DMA, 0);
+ bp->dma_offset = SB_PCI_DMA;
/* XXX - really required?
bp->flags |= B44_FLAG_BUGGY_TXPTR;
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
+ err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_free_res;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
if (err) {
printk(KERN_ERR PFX "No usable DMA configuration, "
"aborting.\n");
bp = netdev_priv(dev);
bp->pdev = pdev;
bp->dev = dev;
- if (b44_debug >= 0)
- bp->msg_enable = (1 << b44_debug) - 1;
- else
- bp->msg_enable = B44_DEF_MSG_ENABLE;
+
+ bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock);
- bp->regs = (unsigned long) ioremap(b44reg_base, b44reg_len);
+ bp->regs = ioremap(b44reg_base, b44reg_len);
if (bp->regs == 0UL) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
dev->poll = b44_poll;
dev->weight = 64;
dev->watchdog_timeo = B44_TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = b44_poll_controller;
+#endif
dev->change_mtu = b44_change_mtu;
dev->irq = pdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+ netif_carrier_off(dev);
+
err = b44_get_invariants(bp);
if (err) {
printk(KERN_ERR PFX "Problem fetching invariants of chip, "
pci_set_drvdata(pdev, dev);
- pci_save_state(bp->pdev, bp->pci_cfg_state);
+ pci_save_state(bp->pdev);
printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
for (i = 0; i < 6; i++)
return 0;
err_out_iounmap:
- iounmap((void *) bp->regs);
+ iounmap(bp->regs);
err_out_free_dev:
free_netdev(dev);
static void __devexit b44_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct b44 *bp = netdev_priv(dev);
- if (dev) {
- struct b44 *bp = netdev_priv(dev);
-
- unregister_netdev(dev);
- iounmap((void *) bp->regs);
- free_netdev(dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- }
+ unregister_netdev(dev);
+ iounmap(bp->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
}
-static int b44_suspend(struct pci_dev *pdev, u32 state)
+static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct b44 *bp = dev->priv;
+ struct b44 *bp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
b44_free_rings(bp);
spin_unlock_irq(&bp->lock);
+
+ free_irq(dev->irq, dev);
+ pci_disable_device(pdev);
return 0;
}
static int b44_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct b44 *bp = dev->priv;
+ struct b44 *bp = netdev_priv(dev);
- pci_restore_state(pdev, bp->pci_cfg_state);
+ pci_restore_state(pdev);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
if (!netif_running(dev))
return 0;
+ if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
+ printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
+
spin_lock_irq(&bp->lock);
b44_init_rings(bp);
add_timer(&bp->timer);
b44_enable_ints(bp);
+ netif_wake_queue(dev);
return 0;
}
static int __init b44_init(void)
{
+ unsigned int dma_desc_align_size = dma_get_cache_alignment();
+
+ /* Setup paramaters for syncing RX/TX DMA descriptors */
+ dma_desc_align_mask = ~(dma_desc_align_size - 1);
+ dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
+
return pci_module_init(&b44_driver);
}