1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 MODULE_VERSION(DRV_MODULE_VERSION);
100 static struct pci_device_id b44_pci_tbl[] = {
101 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
106 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107 { } /* terminate list with empty entry */
110 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
112 static void b44_halt(struct b44 *);
113 static void b44_init_rings(struct b44 *);
114 static void b44_init_hw(struct b44 *, int);
116 static int dma_desc_align_mask;
117 static int dma_desc_sync_size;
119 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
120 #define _B44(x...) # x,
125 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
127 unsigned long offset,
128 enum dma_data_direction dir)
130 dma_sync_single_range_for_device(&pdev->dev, dma_base,
131 offset & dma_desc_align_mask,
132 dma_desc_sync_size, dir);
135 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
137 unsigned long offset,
138 enum dma_data_direction dir)
140 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
141 offset & dma_desc_align_mask,
142 dma_desc_sync_size, dir);
145 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
147 return readl(bp->regs + reg);
150 static inline void bw32(const struct b44 *bp,
151 unsigned long reg, unsigned long val)
153 writel(val, bp->regs + reg);
156 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
157 u32 bit, unsigned long timeout, const int clear)
161 for (i = 0; i < timeout; i++) {
162 u32 val = br32(bp, reg);
164 if (clear && !(val & bit))
166 if (!clear && (val & bit))
171 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
175 (clear ? "clear" : "set"));
181 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
182 * buzz words used on this company's website :-)
184 * All of these routines must be invoked with bp->lock held and
185 * interrupts disabled.
188 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
189 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
191 static u32 ssb_get_core_rev(struct b44 *bp)
193 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
196 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
198 u32 bar_orig, pci_rev, val;
200 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
201 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
202 pci_rev = ssb_get_core_rev(bp);
204 val = br32(bp, B44_SBINTVEC);
206 bw32(bp, B44_SBINTVEC, val);
208 val = br32(bp, SSB_PCI_TRANS_2);
209 val |= SSB_PCI_PREF | SSB_PCI_BURST;
210 bw32(bp, SSB_PCI_TRANS_2, val);
212 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217 static void ssb_core_disable(struct b44 *bp)
219 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
222 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
223 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
224 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
225 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
226 SBTMSLOW_REJECT | SBTMSLOW_RESET));
227 br32(bp, B44_SBTMSLOW);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
230 br32(bp, B44_SBTMSLOW);
234 static void ssb_core_reset(struct b44 *bp)
238 ssb_core_disable(bp);
239 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
240 br32(bp, B44_SBTMSLOW);
243 /* Clear SERR if set, this is a hw bug workaround. */
244 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
245 bw32(bp, B44_SBTMSHIGH, 0);
247 val = br32(bp, B44_SBIMSTATE);
248 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
249 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
251 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
252 br32(bp, B44_SBTMSLOW);
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
256 br32(bp, B44_SBTMSLOW);
260 static int ssb_core_unit(struct b44 *bp)
263 u32 val = br32(bp, B44_SBADMATCH0);
266 type = val & SBADMATCH0_TYPE_MASK;
269 base = val & SBADMATCH0_BS0_MASK;
273 base = val & SBADMATCH0_BS1_MASK;
278 base = val & SBADMATCH0_BS2_MASK;
285 static int ssb_is_core_up(struct b44 *bp)
287 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
291 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
295 val = ((u32) data[2]) << 24;
296 val |= ((u32) data[3]) << 16;
297 val |= ((u32) data[4]) << 8;
298 val |= ((u32) data[5]) << 0;
299 bw32(bp, B44_CAM_DATA_LO, val);
300 val = (CAM_DATA_HI_VALID |
301 (((u32) data[0]) << 8) |
302 (((u32) data[1]) << 0));
303 bw32(bp, B44_CAM_DATA_HI, val);
304 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
305 (index << CAM_CTRL_INDEX_SHIFT)));
306 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
309 static inline void __b44_disable_ints(struct b44 *bp)
311 bw32(bp, B44_IMASK, 0);
314 static void b44_disable_ints(struct b44 *bp)
316 __b44_disable_ints(bp);
318 /* Flush posted writes. */
322 static void b44_enable_ints(struct b44 *bp)
324 bw32(bp, B44_IMASK, bp->imask);
327 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
331 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
332 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
333 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
334 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
335 (reg << MDIO_DATA_RA_SHIFT) |
336 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
337 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
338 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
343 static int b44_writephy(struct b44 *bp, int reg, u32 val)
345 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
346 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
347 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
348 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
349 (reg << MDIO_DATA_RA_SHIFT) |
350 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
351 (val & MDIO_DATA_DATA)));
352 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
355 /* miilib interface */
356 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
357 * due to code existing before miilib use was added to this driver.
358 * Someone should remove this artificial driver limitation in
359 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
361 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
364 struct b44 *bp = netdev_priv(dev);
365 int rc = b44_readphy(bp, location, &val);
371 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
374 struct b44 *bp = netdev_priv(dev);
375 b44_writephy(bp, location, val);
378 static int b44_phy_reset(struct b44 *bp)
383 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
387 err = b44_readphy(bp, MII_BMCR, &val);
389 if (val & BMCR_RESET) {
390 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
399 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
403 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
404 bp->flags |= pause_flags;
406 val = br32(bp, B44_RXCONFIG);
407 if (pause_flags & B44_FLAG_RX_PAUSE)
408 val |= RXCONFIG_FLOW;
410 val &= ~RXCONFIG_FLOW;
411 bw32(bp, B44_RXCONFIG, val);
413 val = br32(bp, B44_MAC_FLOW);
414 if (pause_flags & B44_FLAG_TX_PAUSE)
415 val |= (MAC_FLOW_PAUSE_ENAB |
416 (0xc0 & MAC_FLOW_RX_HI_WATER));
418 val &= ~MAC_FLOW_PAUSE_ENAB;
419 bw32(bp, B44_MAC_FLOW, val);
422 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
426 /* The driver supports only rx pause by default because
427 the b44 mac tx pause mechanism generates excessive
429 Use ethtool to turn on b44 tx pause if necessary.
431 if ((local & ADVERTISE_PAUSE_CAP) &&
432 (local & ADVERTISE_PAUSE_ASYM)){
433 if ((remote & LPA_PAUSE_ASYM) &&
434 !(remote & LPA_PAUSE_CAP))
435 pause_enab |= B44_FLAG_RX_PAUSE;
438 __b44_set_flow_ctrl(bp, pause_enab);
441 static int b44_setup_phy(struct b44 *bp)
446 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
448 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
449 val & MII_ALEDCTRL_ALLMSK)) != 0)
451 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
453 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
454 val | MII_TLEDCTRL_ENABLE)) != 0)
457 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
458 u32 adv = ADVERTISE_CSMA;
460 if (bp->flags & B44_FLAG_ADV_10HALF)
461 adv |= ADVERTISE_10HALF;
462 if (bp->flags & B44_FLAG_ADV_10FULL)
463 adv |= ADVERTISE_10FULL;
464 if (bp->flags & B44_FLAG_ADV_100HALF)
465 adv |= ADVERTISE_100HALF;
466 if (bp->flags & B44_FLAG_ADV_100FULL)
467 adv |= ADVERTISE_100FULL;
469 if (bp->flags & B44_FLAG_PAUSE_AUTO)
470 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
472 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
474 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
475 BMCR_ANRESTART))) != 0)
480 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
482 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
483 if (bp->flags & B44_FLAG_100_BASE_T)
484 bmcr |= BMCR_SPEED100;
485 if (bp->flags & B44_FLAG_FULL_DUPLEX)
486 bmcr |= BMCR_FULLDPLX;
487 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
490 /* Since we will not be negotiating there is no safe way
491 * to determine if the link partner supports flow control
492 * or not. So just disable it completely in this case.
494 b44_set_flow_ctrl(bp, 0, 0);
501 static void b44_stats_update(struct b44 *bp)
506 val = &bp->hw_stats.tx_good_octets;
507 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
508 *val++ += br32(bp, reg);
514 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
515 *val++ += br32(bp, reg);
519 static void b44_link_report(struct b44 *bp)
521 if (!netif_carrier_ok(bp->dev)) {
522 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
524 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
526 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
527 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
529 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
532 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
533 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
537 static void b44_check_phy(struct b44 *bp)
541 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
544 if (aux & MII_AUXCTRL_SPEED)
545 bp->flags |= B44_FLAG_100_BASE_T;
547 bp->flags &= ~B44_FLAG_100_BASE_T;
548 if (aux & MII_AUXCTRL_DUPLEX)
549 bp->flags |= B44_FLAG_FULL_DUPLEX;
551 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
553 if (!netif_carrier_ok(bp->dev) &&
554 (bmsr & BMSR_LSTATUS)) {
555 u32 val = br32(bp, B44_TX_CTRL);
556 u32 local_adv, remote_adv;
558 if (bp->flags & B44_FLAG_FULL_DUPLEX)
559 val |= TX_CTRL_DUPLEX;
561 val &= ~TX_CTRL_DUPLEX;
562 bw32(bp, B44_TX_CTRL, val);
564 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566 !b44_readphy(bp, MII_LPA, &remote_adv))
567 b44_set_flow_ctrl(bp, local_adv, remote_adv);
570 netif_carrier_on(bp->dev);
572 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
574 netif_carrier_off(bp->dev);
578 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
587 static void b44_timer(unsigned long __opaque)
589 struct b44 *bp = (struct b44 *) __opaque;
591 spin_lock_irq(&bp->lock);
595 b44_stats_update(bp);
597 spin_unlock_irq(&bp->lock);
599 bp->timer.expires = jiffies + HZ;
600 add_timer(&bp->timer);
603 static void b44_tx(struct b44 *bp)
607 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
608 cur /= sizeof(struct dma_desc);
610 /* XXX needs updating when NETIF_F_SG is supported */
611 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
612 struct ring_info *rp = &bp->tx_buffers[cons];
613 struct sk_buff *skb = rp->skb;
617 pci_unmap_single(bp->pdev,
618 pci_unmap_addr(rp, mapping),
622 dev_kfree_skb_irq(skb);
626 if (netif_queue_stopped(bp->dev) &&
627 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
628 netif_wake_queue(bp->dev);
630 bw32(bp, B44_GPTIMER, 0);
633 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
634 * before the DMA address you give it. So we allocate 30 more bytes
635 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
636 * point the chip at 30 bytes past where the rx_header will go.
638 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
641 struct ring_info *src_map, *map;
642 struct rx_header *rh;
650 src_map = &bp->rx_buffers[src_idx];
651 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
652 map = &bp->rx_buffers[dest_idx];
653 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
657 mapping = pci_map_single(bp->pdev, skb->data,
661 /* Hardware bug work-around, the chip is unable to do PCI DMA
662 to/from anything above 1GB :-( */
663 if (dma_mapping_error(mapping) ||
664 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
666 if (!dma_mapping_error(mapping))
667 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
668 dev_kfree_skb_any(skb);
669 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
672 mapping = pci_map_single(bp->pdev, skb->data,
675 if (dma_mapping_error(mapping) ||
676 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
677 if (!dma_mapping_error(mapping))
678 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
679 dev_kfree_skb_any(skb);
685 skb_reserve(skb, bp->rx_offset);
687 rh = (struct rx_header *)
688 (skb->data - bp->rx_offset);
693 pci_unmap_addr_set(map, mapping, mapping);
698 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
699 if (dest_idx == (B44_RX_RING_SIZE - 1))
700 ctrl |= DESC_CTRL_EOT;
702 dp = &bp->rx_ring[dest_idx];
703 dp->ctrl = cpu_to_le32(ctrl);
704 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
706 if (bp->flags & B44_FLAG_RX_RING_HACK)
707 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
708 dest_idx * sizeof(dp),
711 return RX_PKT_BUF_SZ;
714 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
716 struct dma_desc *src_desc, *dest_desc;
717 struct ring_info *src_map, *dest_map;
718 struct rx_header *rh;
722 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
723 dest_desc = &bp->rx_ring[dest_idx];
724 dest_map = &bp->rx_buffers[dest_idx];
725 src_desc = &bp->rx_ring[src_idx];
726 src_map = &bp->rx_buffers[src_idx];
728 dest_map->skb = src_map->skb;
729 rh = (struct rx_header *) src_map->skb->data;
732 pci_unmap_addr_set(dest_map, mapping,
733 pci_unmap_addr(src_map, mapping));
735 if (bp->flags & B44_FLAG_RX_RING_HACK)
736 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
737 src_idx * sizeof(src_desc),
740 ctrl = src_desc->ctrl;
741 if (dest_idx == (B44_RX_RING_SIZE - 1))
742 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
744 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
746 dest_desc->ctrl = ctrl;
747 dest_desc->addr = src_desc->addr;
751 if (bp->flags & B44_FLAG_RX_RING_HACK)
752 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
753 dest_idx * sizeof(dest_desc),
756 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
761 static int b44_rx(struct b44 *bp, int budget)
767 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
768 prod /= sizeof(struct dma_desc);
771 while (cons != prod && budget > 0) {
772 struct ring_info *rp = &bp->rx_buffers[cons];
773 struct sk_buff *skb = rp->skb;
774 dma_addr_t map = pci_unmap_addr(rp, mapping);
775 struct rx_header *rh;
778 pci_dma_sync_single_for_cpu(bp->pdev, map,
781 rh = (struct rx_header *) skb->data;
782 len = cpu_to_le16(rh->len);
783 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
784 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
786 b44_recycle_rx(bp, cons, bp->rx_prod);
788 bp->stats.rx_dropped++;
798 len = cpu_to_le16(rh->len);
799 } while (len == 0 && i++ < 5);
807 if (len > RX_COPY_THRESHOLD) {
809 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
812 pci_unmap_single(bp->pdev, map,
813 skb_size, PCI_DMA_FROMDEVICE);
814 /* Leave out rx_header */
815 skb_put(skb, len+bp->rx_offset);
816 skb_pull(skb,bp->rx_offset);
818 struct sk_buff *copy_skb;
820 b44_recycle_rx(bp, cons, bp->rx_prod);
821 copy_skb = dev_alloc_skb(len + 2);
822 if (copy_skb == NULL)
823 goto drop_it_no_recycle;
825 copy_skb->dev = bp->dev;
826 skb_reserve(copy_skb, 2);
827 skb_put(copy_skb, len);
828 /* DMA sync done above, copy just the actual packet */
829 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
833 skb->ip_summed = CHECKSUM_NONE;
834 skb->protocol = eth_type_trans(skb, bp->dev);
835 netif_receive_skb(skb);
836 bp->dev->last_rx = jiffies;
840 bp->rx_prod = (bp->rx_prod + 1) &
841 (B44_RX_RING_SIZE - 1);
842 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
846 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
851 static int b44_poll(struct net_device *netdev, int *budget)
853 struct b44 *bp = netdev_priv(netdev);
856 spin_lock_irq(&bp->lock);
858 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
859 /* spin_lock(&bp->tx_lock); */
861 /* spin_unlock(&bp->tx_lock); */
863 spin_unlock_irq(&bp->lock);
866 if (bp->istat & ISTAT_RX) {
867 int orig_budget = *budget;
870 if (orig_budget > netdev->quota)
871 orig_budget = netdev->quota;
873 work_done = b44_rx(bp, orig_budget);
875 *budget -= work_done;
876 netdev->quota -= work_done;
878 if (work_done >= orig_budget)
882 if (bp->istat & ISTAT_ERRORS) {
883 spin_lock_irq(&bp->lock);
887 netif_wake_queue(bp->dev);
888 spin_unlock_irq(&bp->lock);
893 netif_rx_complete(netdev);
897 return (done ? 0 : 1);
900 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
902 struct net_device *dev = dev_id;
903 struct b44 *bp = netdev_priv(dev);
907 spin_lock(&bp->lock);
909 istat = br32(bp, B44_ISTAT);
910 imask = br32(bp, B44_IMASK);
912 /* ??? What the fuck is the purpose of the interrupt mask
913 * ??? register if we have to mask it out by hand anyways?
919 if (unlikely(!netif_running(dev))) {
920 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
924 if (netif_rx_schedule_prep(dev)) {
925 /* NOTE: These writes are posted by the readback of
926 * the ISTAT register below.
929 __b44_disable_ints(bp);
930 __netif_rx_schedule(dev);
932 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
937 bw32(bp, B44_ISTAT, istat);
940 spin_unlock(&bp->lock);
941 return IRQ_RETVAL(handled);
944 static void b44_tx_timeout(struct net_device *dev)
946 struct b44 *bp = netdev_priv(dev);
948 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
951 spin_lock_irq(&bp->lock);
957 spin_unlock_irq(&bp->lock);
961 netif_wake_queue(dev);
964 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
966 struct b44 *bp = netdev_priv(dev);
967 struct sk_buff *bounce_skb;
968 int rc = NETDEV_TX_OK;
970 u32 len, entry, ctrl;
973 spin_lock_irq(&bp->lock);
975 /* This is a hard error, log it. */
976 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
977 netif_stop_queue(dev);
978 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
983 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
984 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
985 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
986 if (!dma_mapping_error(mapping))
987 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
989 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
994 mapping = pci_map_single(bp->pdev, bounce_skb->data,
995 len, PCI_DMA_TODEVICE);
996 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
997 if (!dma_mapping_error(mapping))
998 pci_unmap_single(bp->pdev, mapping,
999 len, PCI_DMA_TODEVICE);
1000 dev_kfree_skb_any(bounce_skb);
1004 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1005 dev_kfree_skb_any(skb);
1009 entry = bp->tx_prod;
1010 bp->tx_buffers[entry].skb = skb;
1011 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1013 ctrl = (len & DESC_CTRL_LEN);
1014 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1015 if (entry == (B44_TX_RING_SIZE - 1))
1016 ctrl |= DESC_CTRL_EOT;
1018 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1019 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1021 if (bp->flags & B44_FLAG_TX_RING_HACK)
1022 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1023 entry * sizeof(bp->tx_ring[0]),
1026 entry = NEXT_TX(entry);
1028 bp->tx_prod = entry;
1032 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1033 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1034 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1035 if (bp->flags & B44_FLAG_REORDER_BUG)
1036 br32(bp, B44_DMATX_PTR);
1038 if (TX_BUFFS_AVAIL(bp) < 1)
1039 netif_stop_queue(dev);
1041 dev->trans_start = jiffies;
1044 spin_unlock_irq(&bp->lock);
1049 rc = NETDEV_TX_BUSY;
1053 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1055 struct b44 *bp = netdev_priv(dev);
1057 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1060 if (!netif_running(dev)) {
1061 /* We'll just catch it later when the
1068 spin_lock_irq(&bp->lock);
1073 spin_unlock_irq(&bp->lock);
1075 b44_enable_ints(bp);
1080 /* Free up pending packets in all rx/tx rings.
1082 * The chip has been shut down and the driver detached from
1083 * the networking, so no interrupts or new tx packets will
1084 * end up in the driver. bp->lock is not held and we are not
1085 * in an interrupt context and thus may sleep.
1087 static void b44_free_rings(struct b44 *bp)
1089 struct ring_info *rp;
1092 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1093 rp = &bp->rx_buffers[i];
1095 if (rp->skb == NULL)
1097 pci_unmap_single(bp->pdev,
1098 pci_unmap_addr(rp, mapping),
1100 PCI_DMA_FROMDEVICE);
1101 dev_kfree_skb_any(rp->skb);
1105 /* XXX needs changes once NETIF_F_SG is set... */
1106 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107 rp = &bp->tx_buffers[i];
1109 if (rp->skb == NULL)
1111 pci_unmap_single(bp->pdev,
1112 pci_unmap_addr(rp, mapping),
1115 dev_kfree_skb_any(rp->skb);
1120 /* Initialize tx/rx rings for packet processing.
1122 * The chip has been shut down and the driver detached from
1123 * the networking, so no interrupts or new tx packets will
1124 * end up in the driver.
1126 static void b44_init_rings(struct b44 *bp)
1132 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1133 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1135 if (bp->flags & B44_FLAG_RX_RING_HACK)
1136 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1138 PCI_DMA_BIDIRECTIONAL);
1140 if (bp->flags & B44_FLAG_TX_RING_HACK)
1141 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1145 for (i = 0; i < bp->rx_pending; i++) {
1146 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1152 * Must not be invoked with interrupt sources disabled and
1153 * the hardware shutdown down.
1155 static void b44_free_consistent(struct b44 *bp)
1157 kfree(bp->rx_buffers);
1158 bp->rx_buffers = NULL;
1159 kfree(bp->tx_buffers);
1160 bp->tx_buffers = NULL;
1162 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1163 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1168 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1169 bp->rx_ring, bp->rx_ring_dma);
1171 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1174 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1175 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1180 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1181 bp->tx_ring, bp->tx_ring_dma);
1183 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1188 * Must not be invoked with interrupt sources disabled and
1189 * the hardware shutdown down. Can sleep.
1191 static int b44_alloc_consistent(struct b44 *bp)
1195 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1196 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1197 if (!bp->rx_buffers)
1200 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1201 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1202 if (!bp->tx_buffers)
1205 size = DMA_TABLE_BYTES;
1206 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1208 /* Allocation may have failed due to pci_alloc_consistent
1209 insisting on use of GFP_DMA, which is more restrictive
1210 than necessary... */
1211 struct dma_desc *rx_ring;
1212 dma_addr_t rx_ring_dma;
1214 rx_ring = kzalloc(size, GFP_KERNEL);
1218 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1222 if (dma_mapping_error(rx_ring_dma) ||
1223 rx_ring_dma + size > B44_DMA_MASK) {
1228 bp->rx_ring = rx_ring;
1229 bp->rx_ring_dma = rx_ring_dma;
1230 bp->flags |= B44_FLAG_RX_RING_HACK;
1233 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1235 /* Allocation may have failed due to pci_alloc_consistent
1236 insisting on use of GFP_DMA, which is more restrictive
1237 than necessary... */
1238 struct dma_desc *tx_ring;
1239 dma_addr_t tx_ring_dma;
1241 tx_ring = kzalloc(size, GFP_KERNEL);
1245 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1249 if (dma_mapping_error(tx_ring_dma) ||
1250 tx_ring_dma + size > B44_DMA_MASK) {
1255 bp->tx_ring = tx_ring;
1256 bp->tx_ring_dma = tx_ring_dma;
1257 bp->flags |= B44_FLAG_TX_RING_HACK;
1263 b44_free_consistent(bp);
1267 /* bp->lock is held. */
1268 static void b44_clear_stats(struct b44 *bp)
1272 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1273 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1275 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1279 /* bp->lock is held. */
1280 static void b44_chip_reset(struct b44 *bp)
1282 if (ssb_is_core_up(bp)) {
1283 bw32(bp, B44_RCV_LAZY, 0);
1284 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1285 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1286 bw32(bp, B44_DMATX_CTRL, 0);
1287 bp->tx_prod = bp->tx_cons = 0;
1288 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1289 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1292 bw32(bp, B44_DMARX_CTRL, 0);
1293 bp->rx_prod = bp->rx_cons = 0;
1295 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1302 b44_clear_stats(bp);
1304 /* Make PHY accessible. */
1305 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1306 (0x0d & MDIO_CTRL_MAXF_MASK)));
1307 br32(bp, B44_MDIO_CTRL);
1309 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1310 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1311 br32(bp, B44_ENET_CTRL);
1312 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1314 u32 val = br32(bp, B44_DEVCTRL);
1316 if (val & DEVCTRL_EPR) {
1317 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1318 br32(bp, B44_DEVCTRL);
1321 bp->flags |= B44_FLAG_INTERNAL_PHY;
1325 /* bp->lock is held. */
1326 static void b44_halt(struct b44 *bp)
1328 b44_disable_ints(bp);
1332 /* bp->lock is held. */
1333 static void __b44_set_mac_addr(struct b44 *bp)
1335 bw32(bp, B44_CAM_CTRL, 0);
1336 if (!(bp->dev->flags & IFF_PROMISC)) {
1339 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1340 val = br32(bp, B44_CAM_CTRL);
1341 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1345 static int b44_set_mac_addr(struct net_device *dev, void *p)
1347 struct b44 *bp = netdev_priv(dev);
1348 struct sockaddr *addr = p;
1350 if (netif_running(dev))
1353 if (!is_valid_ether_addr(addr->sa_data))
1356 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1358 spin_lock_irq(&bp->lock);
1359 __b44_set_mac_addr(bp);
1360 spin_unlock_irq(&bp->lock);
1365 /* Called at device open time to get the chip ready for
1366 * packet processing. Invoked with bp->lock held.
1368 static void __b44_set_rx_mode(struct net_device *);
1369 static void b44_init_hw(struct b44 *bp, int full_reset)
1379 /* Enable CRC32, set proper LED modes and power on PHY */
1380 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1381 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1383 /* This sets the MAC address too. */
1384 __b44_set_rx_mode(bp->dev);
1386 /* MTU + eth header + possible VLAN tag + struct rx_header */
1387 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1388 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1390 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1392 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1393 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1394 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1395 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1396 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1398 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1399 bp->rx_prod = bp->rx_pending;
1401 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1403 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1407 val = br32(bp, B44_ENET_CTRL);
1408 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1411 static int b44_open(struct net_device *dev)
1413 struct b44 *bp = netdev_priv(dev);
1416 err = b44_alloc_consistent(bp);
1425 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1426 if (unlikely(err < 0)) {
1429 b44_free_consistent(bp);
1433 init_timer(&bp->timer);
1434 bp->timer.expires = jiffies + HZ;
1435 bp->timer.data = (unsigned long) bp;
1436 bp->timer.function = b44_timer;
1437 add_timer(&bp->timer);
1439 b44_enable_ints(bp);
1440 netif_start_queue(dev);
1446 /*static*/ void b44_dump_state(struct b44 *bp)
1448 u32 val32, val32_2, val32_3, val32_4, val32_5;
1451 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1452 printk("DEBUG: PCI status [%04x] \n", val16);
1457 #ifdef CONFIG_NET_POLL_CONTROLLER
1459 * Polling receive - used by netconsole and other diagnostic tools
1460 * to allow network i/o with interrupts disabled.
1462 static void b44_poll_controller(struct net_device *dev)
1464 disable_irq(dev->irq);
1465 b44_interrupt(dev->irq, dev, NULL);
1466 enable_irq(dev->irq);
1470 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1473 u32 *pattern = (u32 *) pp;
1475 for (i = 0; i < bytes; i += sizeof(u32)) {
1476 bw32(bp, B44_FILT_ADDR, table_offset + i);
1477 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1481 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1484 int k, j, len = offset;
1485 int ethaddr_bytes = ETH_ALEN;
1487 memset(ppattern + offset, 0xff, magicsync);
1488 for (j = 0; j < magicsync; j++)
1489 set_bit(len++, (unsigned long *) pmask);
1491 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1492 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1493 ethaddr_bytes = ETH_ALEN;
1495 ethaddr_bytes = B44_PATTERN_SIZE - len;
1496 if (ethaddr_bytes <=0)
1498 for (k = 0; k< ethaddr_bytes; k++) {
1499 ppattern[offset + magicsync +
1500 (j * ETH_ALEN) + k] = macaddr[k];
1502 set_bit(len, (unsigned long *) pmask);
1508 /* Setup magic packet patterns in the b44 WOL
1509 * pattern matching filter.
1511 static void b44_setup_pseudo_magicp(struct b44 *bp)
1515 int plen0, plen1, plen2;
1517 u8 pwol_mask[B44_PMASK_SIZE];
1519 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1520 if (!pwol_pattern) {
1521 printk(KERN_ERR PFX "Memory not available for WOL\n");
1525 /* Ipv4 magic packet pattern - pattern 0.*/
1526 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1527 memset(pwol_mask, 0, B44_PMASK_SIZE);
1528 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1529 B44_ETHIPV4UDP_HLEN);
1531 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1532 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1534 /* Raw ethernet II magic packet pattern - pattern 1 */
1535 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1536 memset(pwol_mask, 0, B44_PMASK_SIZE);
1537 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1540 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1541 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1542 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1543 B44_PMASK_BASE + B44_PMASK_SIZE);
1545 /* Ipv6 magic packet pattern - pattern 2 */
1546 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1547 memset(pwol_mask, 0, B44_PMASK_SIZE);
1548 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1549 B44_ETHIPV6UDP_HLEN);
1551 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1552 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1553 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1554 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1556 kfree(pwol_pattern);
1558 /* set these pattern's lengths: one less than each real length */
1559 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1560 bw32(bp, B44_WKUP_LEN, val);
1562 /* enable wakeup pattern matching */
1563 val = br32(bp, B44_DEVCTRL);
1564 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1568 static void b44_setup_wol(struct b44 *bp)
1573 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1575 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1577 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1579 val = bp->dev->dev_addr[2] << 24 |
1580 bp->dev->dev_addr[3] << 16 |
1581 bp->dev->dev_addr[4] << 8 |
1582 bp->dev->dev_addr[5];
1583 bw32(bp, B44_ADDR_LO, val);
1585 val = bp->dev->dev_addr[0] << 8 |
1586 bp->dev->dev_addr[1];
1587 bw32(bp, B44_ADDR_HI, val);
1589 val = br32(bp, B44_DEVCTRL);
1590 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1593 b44_setup_pseudo_magicp(bp);
1596 val = br32(bp, B44_SBTMSLOW);
1597 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1599 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1600 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1604 static int b44_close(struct net_device *dev)
1606 struct b44 *bp = netdev_priv(dev);
1608 netif_stop_queue(dev);
1610 netif_poll_disable(dev);
1612 del_timer_sync(&bp->timer);
1614 spin_lock_irq(&bp->lock);
1621 netif_carrier_off(dev);
1623 spin_unlock_irq(&bp->lock);
1625 free_irq(dev->irq, dev);
1627 netif_poll_enable(dev);
1629 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1634 b44_free_consistent(bp);
1639 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1641 struct b44 *bp = netdev_priv(dev);
1642 struct net_device_stats *nstat = &bp->stats;
1643 struct b44_hw_stats *hwstat = &bp->hw_stats;
1645 /* Convert HW stats into netdevice stats. */
1646 nstat->rx_packets = hwstat->rx_pkts;
1647 nstat->tx_packets = hwstat->tx_pkts;
1648 nstat->rx_bytes = hwstat->rx_octets;
1649 nstat->tx_bytes = hwstat->tx_octets;
1650 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1651 hwstat->tx_oversize_pkts +
1652 hwstat->tx_underruns +
1653 hwstat->tx_excessive_cols +
1654 hwstat->tx_late_cols);
1655 nstat->multicast = hwstat->tx_multicast_pkts;
1656 nstat->collisions = hwstat->tx_total_cols;
1658 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1659 hwstat->rx_undersize);
1660 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1661 nstat->rx_frame_errors = hwstat->rx_align_errs;
1662 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1663 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1664 hwstat->rx_oversize_pkts +
1665 hwstat->rx_missed_pkts +
1666 hwstat->rx_crc_align_errs +
1667 hwstat->rx_undersize +
1668 hwstat->rx_crc_errs +
1669 hwstat->rx_align_errs +
1670 hwstat->rx_symbol_errs);
1672 nstat->tx_aborted_errors = hwstat->tx_underruns;
1674 /* Carrier lost counter seems to be broken for some devices */
1675 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1681 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1683 struct dev_mc_list *mclist;
1686 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1687 mclist = dev->mc_list;
1688 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1689 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1694 static void __b44_set_rx_mode(struct net_device *dev)
1696 struct b44 *bp = netdev_priv(dev);
1699 val = br32(bp, B44_RXCONFIG);
1700 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1701 if (dev->flags & IFF_PROMISC) {
1702 val |= RXCONFIG_PROMISC;
1703 bw32(bp, B44_RXCONFIG, val);
1705 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1708 __b44_set_mac_addr(bp);
1710 if (dev->flags & IFF_ALLMULTI)
1711 val |= RXCONFIG_ALLMULTI;
1713 i = __b44_load_mcast(bp, dev);
1715 for (; i < 64; i++) {
1716 __b44_cam_write(bp, zero, i);
1718 bw32(bp, B44_RXCONFIG, val);
1719 val = br32(bp, B44_CAM_CTRL);
1720 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1724 static void b44_set_rx_mode(struct net_device *dev)
1726 struct b44 *bp = netdev_priv(dev);
1728 spin_lock_irq(&bp->lock);
1729 __b44_set_rx_mode(dev);
1730 spin_unlock_irq(&bp->lock);
1733 static u32 b44_get_msglevel(struct net_device *dev)
1735 struct b44 *bp = netdev_priv(dev);
1736 return bp->msg_enable;
1739 static void b44_set_msglevel(struct net_device *dev, u32 value)
1741 struct b44 *bp = netdev_priv(dev);
1742 bp->msg_enable = value;
1745 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1747 struct b44 *bp = netdev_priv(dev);
1748 struct pci_dev *pci_dev = bp->pdev;
1750 strcpy (info->driver, DRV_MODULE_NAME);
1751 strcpy (info->version, DRV_MODULE_VERSION);
1752 strcpy (info->bus_info, pci_name(pci_dev));
1755 static int b44_nway_reset(struct net_device *dev)
1757 struct b44 *bp = netdev_priv(dev);
1761 spin_lock_irq(&bp->lock);
1762 b44_readphy(bp, MII_BMCR, &bmcr);
1763 b44_readphy(bp, MII_BMCR, &bmcr);
1765 if (bmcr & BMCR_ANENABLE) {
1766 b44_writephy(bp, MII_BMCR,
1767 bmcr | BMCR_ANRESTART);
1770 spin_unlock_irq(&bp->lock);
1775 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1777 struct b44 *bp = netdev_priv(dev);
1779 cmd->supported = (SUPPORTED_Autoneg);
1780 cmd->supported |= (SUPPORTED_100baseT_Half |
1781 SUPPORTED_100baseT_Full |
1782 SUPPORTED_10baseT_Half |
1783 SUPPORTED_10baseT_Full |
1786 cmd->advertising = 0;
1787 if (bp->flags & B44_FLAG_ADV_10HALF)
1788 cmd->advertising |= ADVERTISED_10baseT_Half;
1789 if (bp->flags & B44_FLAG_ADV_10FULL)
1790 cmd->advertising |= ADVERTISED_10baseT_Full;
1791 if (bp->flags & B44_FLAG_ADV_100HALF)
1792 cmd->advertising |= ADVERTISED_100baseT_Half;
1793 if (bp->flags & B44_FLAG_ADV_100FULL)
1794 cmd->advertising |= ADVERTISED_100baseT_Full;
1795 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1796 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1797 SPEED_100 : SPEED_10;
1798 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1799 DUPLEX_FULL : DUPLEX_HALF;
1801 cmd->phy_address = bp->phy_addr;
1802 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1803 XCVR_INTERNAL : XCVR_EXTERNAL;
1804 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1805 AUTONEG_DISABLE : AUTONEG_ENABLE;
1806 if (cmd->autoneg == AUTONEG_ENABLE)
1807 cmd->advertising |= ADVERTISED_Autoneg;
1808 if (!netif_running(dev)){
1817 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1819 struct b44 *bp = netdev_priv(dev);
1821 /* We do not support gigabit. */
1822 if (cmd->autoneg == AUTONEG_ENABLE) {
1823 if (cmd->advertising &
1824 (ADVERTISED_1000baseT_Half |
1825 ADVERTISED_1000baseT_Full))
1827 } else if ((cmd->speed != SPEED_100 &&
1828 cmd->speed != SPEED_10) ||
1829 (cmd->duplex != DUPLEX_HALF &&
1830 cmd->duplex != DUPLEX_FULL)) {
1834 spin_lock_irq(&bp->lock);
1836 if (cmd->autoneg == AUTONEG_ENABLE) {
1837 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1838 B44_FLAG_100_BASE_T |
1839 B44_FLAG_FULL_DUPLEX |
1840 B44_FLAG_ADV_10HALF |
1841 B44_FLAG_ADV_10FULL |
1842 B44_FLAG_ADV_100HALF |
1843 B44_FLAG_ADV_100FULL);
1844 if (cmd->advertising == 0) {
1845 bp->flags |= (B44_FLAG_ADV_10HALF |
1846 B44_FLAG_ADV_10FULL |
1847 B44_FLAG_ADV_100HALF |
1848 B44_FLAG_ADV_100FULL);
1850 if (cmd->advertising & ADVERTISED_10baseT_Half)
1851 bp->flags |= B44_FLAG_ADV_10HALF;
1852 if (cmd->advertising & ADVERTISED_10baseT_Full)
1853 bp->flags |= B44_FLAG_ADV_10FULL;
1854 if (cmd->advertising & ADVERTISED_100baseT_Half)
1855 bp->flags |= B44_FLAG_ADV_100HALF;
1856 if (cmd->advertising & ADVERTISED_100baseT_Full)
1857 bp->flags |= B44_FLAG_ADV_100FULL;
1860 bp->flags |= B44_FLAG_FORCE_LINK;
1861 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1862 if (cmd->speed == SPEED_100)
1863 bp->flags |= B44_FLAG_100_BASE_T;
1864 if (cmd->duplex == DUPLEX_FULL)
1865 bp->flags |= B44_FLAG_FULL_DUPLEX;
1868 if (netif_running(dev))
1871 spin_unlock_irq(&bp->lock);
1876 static void b44_get_ringparam(struct net_device *dev,
1877 struct ethtool_ringparam *ering)
1879 struct b44 *bp = netdev_priv(dev);
1881 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1882 ering->rx_pending = bp->rx_pending;
1884 /* XXX ethtool lacks a tx_max_pending, oops... */
1887 static int b44_set_ringparam(struct net_device *dev,
1888 struct ethtool_ringparam *ering)
1890 struct b44 *bp = netdev_priv(dev);
1892 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1893 (ering->rx_mini_pending != 0) ||
1894 (ering->rx_jumbo_pending != 0) ||
1895 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1898 spin_lock_irq(&bp->lock);
1900 bp->rx_pending = ering->rx_pending;
1901 bp->tx_pending = ering->tx_pending;
1906 netif_wake_queue(bp->dev);
1907 spin_unlock_irq(&bp->lock);
1909 b44_enable_ints(bp);
1914 static void b44_get_pauseparam(struct net_device *dev,
1915 struct ethtool_pauseparam *epause)
1917 struct b44 *bp = netdev_priv(dev);
1920 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1922 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1924 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1927 static int b44_set_pauseparam(struct net_device *dev,
1928 struct ethtool_pauseparam *epause)
1930 struct b44 *bp = netdev_priv(dev);
1932 spin_lock_irq(&bp->lock);
1933 if (epause->autoneg)
1934 bp->flags |= B44_FLAG_PAUSE_AUTO;
1936 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1937 if (epause->rx_pause)
1938 bp->flags |= B44_FLAG_RX_PAUSE;
1940 bp->flags &= ~B44_FLAG_RX_PAUSE;
1941 if (epause->tx_pause)
1942 bp->flags |= B44_FLAG_TX_PAUSE;
1944 bp->flags &= ~B44_FLAG_TX_PAUSE;
1945 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1950 __b44_set_flow_ctrl(bp, bp->flags);
1952 spin_unlock_irq(&bp->lock);
1954 b44_enable_ints(bp);
1959 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1963 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1968 static int b44_get_stats_count(struct net_device *dev)
1970 return ARRAY_SIZE(b44_gstrings);
1973 static void b44_get_ethtool_stats(struct net_device *dev,
1974 struct ethtool_stats *stats, u64 *data)
1976 struct b44 *bp = netdev_priv(dev);
1977 u32 *val = &bp->hw_stats.tx_good_octets;
1980 spin_lock_irq(&bp->lock);
1982 b44_stats_update(bp);
1984 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1987 spin_unlock_irq(&bp->lock);
1990 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1992 struct b44 *bp = netdev_priv(dev);
1994 wol->supported = WAKE_MAGIC;
1995 if (bp->flags & B44_FLAG_WOL_ENABLE)
1996 wol->wolopts = WAKE_MAGIC;
1999 memset(&wol->sopass, 0, sizeof(wol->sopass));
2002 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2004 struct b44 *bp = netdev_priv(dev);
2006 spin_lock_irq(&bp->lock);
2007 if (wol->wolopts & WAKE_MAGIC)
2008 bp->flags |= B44_FLAG_WOL_ENABLE;
2010 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2011 spin_unlock_irq(&bp->lock);
2016 static struct ethtool_ops b44_ethtool_ops = {
2017 .get_drvinfo = b44_get_drvinfo,
2018 .get_settings = b44_get_settings,
2019 .set_settings = b44_set_settings,
2020 .nway_reset = b44_nway_reset,
2021 .get_link = ethtool_op_get_link,
2022 .get_wol = b44_get_wol,
2023 .set_wol = b44_set_wol,
2024 .get_ringparam = b44_get_ringparam,
2025 .set_ringparam = b44_set_ringparam,
2026 .get_pauseparam = b44_get_pauseparam,
2027 .set_pauseparam = b44_set_pauseparam,
2028 .get_msglevel = b44_get_msglevel,
2029 .set_msglevel = b44_set_msglevel,
2030 .get_strings = b44_get_strings,
2031 .get_stats_count = b44_get_stats_count,
2032 .get_ethtool_stats = b44_get_ethtool_stats,
2033 .get_perm_addr = ethtool_op_get_perm_addr,
2036 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2038 struct mii_ioctl_data *data = if_mii(ifr);
2039 struct b44 *bp = netdev_priv(dev);
2042 if (!netif_running(dev))
2045 spin_lock_irq(&bp->lock);
2046 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2047 spin_unlock_irq(&bp->lock);
2052 /* Read 128-bytes of EEPROM. */
2053 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2056 u16 *ptr = (u16 *) data;
2058 for (i = 0; i < 128; i += 2)
2059 ptr[i / 2] = readw(bp->regs + 4096 + i);
2064 static int __devinit b44_get_invariants(struct b44 *bp)
2069 err = b44_read_eeprom(bp, &eeprom[0]);
2073 bp->dev->dev_addr[0] = eeprom[79];
2074 bp->dev->dev_addr[1] = eeprom[78];
2075 bp->dev->dev_addr[2] = eeprom[81];
2076 bp->dev->dev_addr[3] = eeprom[80];
2077 bp->dev->dev_addr[4] = eeprom[83];
2078 bp->dev->dev_addr[5] = eeprom[82];
2080 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2081 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2085 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2087 bp->phy_addr = eeprom[90] & 0x1f;
2089 /* With this, plus the rx_header prepended to the data by the
2090 * hardware, we'll land the ethernet header on a 2-byte boundary.
2094 bp->imask = IMASK_DEF;
2096 bp->core_unit = ssb_core_unit(bp);
2097 bp->dma_offset = SB_PCI_DMA;
2099 /* XXX - really required?
2100 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2103 if (ssb_get_core_rev(bp) >= 7)
2104 bp->flags |= B44_FLAG_B0_ANDLATER;
2110 static int __devinit b44_init_one(struct pci_dev *pdev,
2111 const struct pci_device_id *ent)
2113 static int b44_version_printed = 0;
2114 unsigned long b44reg_base, b44reg_len;
2115 struct net_device *dev;
2119 if (b44_version_printed++ == 0)
2120 printk(KERN_INFO "%s", version);
2122 err = pci_enable_device(pdev);
2124 dev_err(&pdev->dev, "Cannot enable PCI device, "
2129 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2131 "Cannot find proper PCI device "
2132 "base address, aborting.\n");
2134 goto err_out_disable_pdev;
2137 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2140 "Cannot obtain PCI resources, aborting.\n");
2141 goto err_out_disable_pdev;
2144 pci_set_master(pdev);
2146 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2148 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2149 goto err_out_free_res;
2152 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2154 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2155 goto err_out_free_res;
2158 b44reg_base = pci_resource_start(pdev, 0);
2159 b44reg_len = pci_resource_len(pdev, 0);
2161 dev = alloc_etherdev(sizeof(*bp));
2163 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2165 goto err_out_free_res;
2168 SET_MODULE_OWNER(dev);
2169 SET_NETDEV_DEV(dev,&pdev->dev);
2171 /* No interesting netdevice features in this card... */
2174 bp = netdev_priv(dev);
2178 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2180 spin_lock_init(&bp->lock);
2182 bp->regs = ioremap(b44reg_base, b44reg_len);
2183 if (bp->regs == 0UL) {
2184 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2186 goto err_out_free_dev;
2189 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2190 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2192 dev->open = b44_open;
2193 dev->stop = b44_close;
2194 dev->hard_start_xmit = b44_start_xmit;
2195 dev->get_stats = b44_get_stats;
2196 dev->set_multicast_list = b44_set_rx_mode;
2197 dev->set_mac_address = b44_set_mac_addr;
2198 dev->do_ioctl = b44_ioctl;
2199 dev->tx_timeout = b44_tx_timeout;
2200 dev->poll = b44_poll;
2202 dev->watchdog_timeo = B44_TX_TIMEOUT;
2203 #ifdef CONFIG_NET_POLL_CONTROLLER
2204 dev->poll_controller = b44_poll_controller;
2206 dev->change_mtu = b44_change_mtu;
2207 dev->irq = pdev->irq;
2208 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2210 netif_carrier_off(dev);
2212 err = b44_get_invariants(bp);
2215 "Problem fetching invariants of chip, aborting.\n");
2216 goto err_out_iounmap;
2219 bp->mii_if.dev = dev;
2220 bp->mii_if.mdio_read = b44_mii_read;
2221 bp->mii_if.mdio_write = b44_mii_write;
2222 bp->mii_if.phy_id = bp->phy_addr;
2223 bp->mii_if.phy_id_mask = 0x1f;
2224 bp->mii_if.reg_num_mask = 0x1f;
2226 /* By default, advertise all speed/duplex settings. */
2227 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2228 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2230 /* By default, auto-negotiate PAUSE. */
2231 bp->flags |= B44_FLAG_PAUSE_AUTO;
2233 err = register_netdev(dev);
2235 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2236 goto err_out_iounmap;
2239 pci_set_drvdata(pdev, dev);
2241 pci_save_state(bp->pdev);
2243 /* Chip reset provides power to the b44 MAC & PCI cores, which
2244 * is necessary for MAC register access.
2248 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2249 for (i = 0; i < 6; i++)
2250 printk("%2.2x%c", dev->dev_addr[i],
2251 i == 5 ? '\n' : ':');
2262 pci_release_regions(pdev);
2264 err_out_disable_pdev:
2265 pci_disable_device(pdev);
2266 pci_set_drvdata(pdev, NULL);
2270 static void __devexit b44_remove_one(struct pci_dev *pdev)
2272 struct net_device *dev = pci_get_drvdata(pdev);
2273 struct b44 *bp = netdev_priv(dev);
2275 unregister_netdev(dev);
2278 pci_release_regions(pdev);
2279 pci_disable_device(pdev);
2280 pci_set_drvdata(pdev, NULL);
2283 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2285 struct net_device *dev = pci_get_drvdata(pdev);
2286 struct b44 *bp = netdev_priv(dev);
2288 if (!netif_running(dev))
2291 del_timer_sync(&bp->timer);
2293 spin_lock_irq(&bp->lock);
2296 netif_carrier_off(bp->dev);
2297 netif_device_detach(bp->dev);
2300 spin_unlock_irq(&bp->lock);
2302 free_irq(dev->irq, dev);
2303 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2307 pci_disable_device(pdev);
2311 static int b44_resume(struct pci_dev *pdev)
2313 struct net_device *dev = pci_get_drvdata(pdev);
2314 struct b44 *bp = netdev_priv(dev);
2316 pci_restore_state(pdev);
2317 pci_enable_device(pdev);
2318 pci_set_master(pdev);
2320 if (!netif_running(dev))
2323 if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2324 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2326 spin_lock_irq(&bp->lock);
2330 netif_device_attach(bp->dev);
2331 spin_unlock_irq(&bp->lock);
2333 bp->timer.expires = jiffies + HZ;
2334 add_timer(&bp->timer);
2336 b44_enable_ints(bp);
2337 netif_wake_queue(dev);
2341 static struct pci_driver b44_driver = {
2342 .name = DRV_MODULE_NAME,
2343 .id_table = b44_pci_tbl,
2344 .probe = b44_init_one,
2345 .remove = __devexit_p(b44_remove_one),
2346 .suspend = b44_suspend,
2347 .resume = b44_resume,
2350 static int __init b44_init(void)
2352 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2354 /* Setup paramaters for syncing RX/TX DMA descriptors */
2355 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2356 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2358 return pci_module_init(&b44_driver);
2361 static void __exit b44_cleanup(void)
2363 pci_unregister_driver(&b44_driver);
2366 module_init(b44_init);
2367 module_exit(b44_cleanup);