1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 MODULE_VERSION(DRV_MODULE_VERSION);
100 static struct pci_device_id b44_pci_tbl[] = {
101 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
106 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107 { } /* terminate list with empty entry */
110 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
112 static void b44_halt(struct b44 *);
113 static void b44_init_rings(struct b44 *);
115 #define B44_FULL_RESET 1
116 #define B44_FULL_RESET_SKIP_PHY 2
117 #define B44_PARTIAL_RESET 3
119 static void b44_init_hw(struct b44 *, int);
121 static int dma_desc_align_mask;
122 static int dma_desc_sync_size;
124 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
125 #define _B44(x...) # x,
130 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
132 unsigned long offset,
133 enum dma_data_direction dir)
135 dma_sync_single_range_for_device(&pdev->dev, dma_base,
136 offset & dma_desc_align_mask,
137 dma_desc_sync_size, dir);
140 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
142 unsigned long offset,
143 enum dma_data_direction dir)
145 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
146 offset & dma_desc_align_mask,
147 dma_desc_sync_size, dir);
150 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
152 return readl(bp->regs + reg);
155 static inline void bw32(const struct b44 *bp,
156 unsigned long reg, unsigned long val)
158 writel(val, bp->regs + reg);
161 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
162 u32 bit, unsigned long timeout, const int clear)
166 for (i = 0; i < timeout; i++) {
167 u32 val = br32(bp, reg);
169 if (clear && !(val & bit))
171 if (!clear && (val & bit))
176 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
180 (clear ? "clear" : "set"));
186 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
187 * buzz words used on this company's website :-)
189 * All of these routines must be invoked with bp->lock held and
190 * interrupts disabled.
193 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
194 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
196 static u32 ssb_get_core_rev(struct b44 *bp)
198 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
201 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
203 u32 bar_orig, pci_rev, val;
205 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
206 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
207 pci_rev = ssb_get_core_rev(bp);
209 val = br32(bp, B44_SBINTVEC);
211 bw32(bp, B44_SBINTVEC, val);
213 val = br32(bp, SSB_PCI_TRANS_2);
214 val |= SSB_PCI_PREF | SSB_PCI_BURST;
215 bw32(bp, SSB_PCI_TRANS_2, val);
217 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
222 static void ssb_core_disable(struct b44 *bp)
224 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
227 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
228 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
229 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
230 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
231 SBTMSLOW_REJECT | SBTMSLOW_RESET));
232 br32(bp, B44_SBTMSLOW);
234 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
235 br32(bp, B44_SBTMSLOW);
239 static void ssb_core_reset(struct b44 *bp)
243 ssb_core_disable(bp);
244 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
245 br32(bp, B44_SBTMSLOW);
248 /* Clear SERR if set, this is a hw bug workaround. */
249 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
250 bw32(bp, B44_SBTMSHIGH, 0);
252 val = br32(bp, B44_SBIMSTATE);
253 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
254 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
256 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
257 br32(bp, B44_SBTMSLOW);
260 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
261 br32(bp, B44_SBTMSLOW);
265 static int ssb_core_unit(struct b44 *bp)
268 u32 val = br32(bp, B44_SBADMATCH0);
271 type = val & SBADMATCH0_TYPE_MASK;
274 base = val & SBADMATCH0_BS0_MASK;
278 base = val & SBADMATCH0_BS1_MASK;
283 base = val & SBADMATCH0_BS2_MASK;
290 static int ssb_is_core_up(struct b44 *bp)
292 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
296 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
300 val = ((u32) data[2]) << 24;
301 val |= ((u32) data[3]) << 16;
302 val |= ((u32) data[4]) << 8;
303 val |= ((u32) data[5]) << 0;
304 bw32(bp, B44_CAM_DATA_LO, val);
305 val = (CAM_DATA_HI_VALID |
306 (((u32) data[0]) << 8) |
307 (((u32) data[1]) << 0));
308 bw32(bp, B44_CAM_DATA_HI, val);
309 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
310 (index << CAM_CTRL_INDEX_SHIFT)));
311 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
314 static inline void __b44_disable_ints(struct b44 *bp)
316 bw32(bp, B44_IMASK, 0);
319 static void b44_disable_ints(struct b44 *bp)
321 __b44_disable_ints(bp);
323 /* Flush posted writes. */
327 static void b44_enable_ints(struct b44 *bp)
329 bw32(bp, B44_IMASK, bp->imask);
332 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
336 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
337 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
338 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
339 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
340 (reg << MDIO_DATA_RA_SHIFT) |
341 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
342 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
348 static int b44_writephy(struct b44 *bp, int reg, u32 val)
350 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
351 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
352 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
353 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
354 (reg << MDIO_DATA_RA_SHIFT) |
355 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
356 (val & MDIO_DATA_DATA)));
357 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
360 /* miilib interface */
361 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
362 * due to code existing before miilib use was added to this driver.
363 * Someone should remove this artificial driver limitation in
364 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
366 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
369 struct b44 *bp = netdev_priv(dev);
370 int rc = b44_readphy(bp, location, &val);
376 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
379 struct b44 *bp = netdev_priv(dev);
380 b44_writephy(bp, location, val);
383 static int b44_phy_reset(struct b44 *bp)
388 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
392 err = b44_readphy(bp, MII_BMCR, &val);
394 if (val & BMCR_RESET) {
395 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
404 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
408 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
409 bp->flags |= pause_flags;
411 val = br32(bp, B44_RXCONFIG);
412 if (pause_flags & B44_FLAG_RX_PAUSE)
413 val |= RXCONFIG_FLOW;
415 val &= ~RXCONFIG_FLOW;
416 bw32(bp, B44_RXCONFIG, val);
418 val = br32(bp, B44_MAC_FLOW);
419 if (pause_flags & B44_FLAG_TX_PAUSE)
420 val |= (MAC_FLOW_PAUSE_ENAB |
421 (0xc0 & MAC_FLOW_RX_HI_WATER));
423 val &= ~MAC_FLOW_PAUSE_ENAB;
424 bw32(bp, B44_MAC_FLOW, val);
427 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
431 /* The driver supports only rx pause by default because
432 the b44 mac tx pause mechanism generates excessive
434 Use ethtool to turn on b44 tx pause if necessary.
436 if ((local & ADVERTISE_PAUSE_CAP) &&
437 (local & ADVERTISE_PAUSE_ASYM)){
438 if ((remote & LPA_PAUSE_ASYM) &&
439 !(remote & LPA_PAUSE_CAP))
440 pause_enab |= B44_FLAG_RX_PAUSE;
443 __b44_set_flow_ctrl(bp, pause_enab);
446 static int b44_setup_phy(struct b44 *bp)
451 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
453 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
454 val & MII_ALEDCTRL_ALLMSK)) != 0)
456 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
458 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
459 val | MII_TLEDCTRL_ENABLE)) != 0)
462 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
463 u32 adv = ADVERTISE_CSMA;
465 if (bp->flags & B44_FLAG_ADV_10HALF)
466 adv |= ADVERTISE_10HALF;
467 if (bp->flags & B44_FLAG_ADV_10FULL)
468 adv |= ADVERTISE_10FULL;
469 if (bp->flags & B44_FLAG_ADV_100HALF)
470 adv |= ADVERTISE_100HALF;
471 if (bp->flags & B44_FLAG_ADV_100FULL)
472 adv |= ADVERTISE_100FULL;
474 if (bp->flags & B44_FLAG_PAUSE_AUTO)
475 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
477 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
479 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
480 BMCR_ANRESTART))) != 0)
485 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
487 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
488 if (bp->flags & B44_FLAG_100_BASE_T)
489 bmcr |= BMCR_SPEED100;
490 if (bp->flags & B44_FLAG_FULL_DUPLEX)
491 bmcr |= BMCR_FULLDPLX;
492 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
495 /* Since we will not be negotiating there is no safe way
496 * to determine if the link partner supports flow control
497 * or not. So just disable it completely in this case.
499 b44_set_flow_ctrl(bp, 0, 0);
506 static void b44_stats_update(struct b44 *bp)
511 val = &bp->hw_stats.tx_good_octets;
512 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
513 *val++ += br32(bp, reg);
519 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
520 *val++ += br32(bp, reg);
524 static void b44_link_report(struct b44 *bp)
526 if (!netif_carrier_ok(bp->dev)) {
527 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
529 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
531 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
532 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
534 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
537 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
538 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
542 static void b44_check_phy(struct b44 *bp)
546 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
547 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
549 if (aux & MII_AUXCTRL_SPEED)
550 bp->flags |= B44_FLAG_100_BASE_T;
552 bp->flags &= ~B44_FLAG_100_BASE_T;
553 if (aux & MII_AUXCTRL_DUPLEX)
554 bp->flags |= B44_FLAG_FULL_DUPLEX;
556 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
558 if (!netif_carrier_ok(bp->dev) &&
559 (bmsr & BMSR_LSTATUS)) {
560 u32 val = br32(bp, B44_TX_CTRL);
561 u32 local_adv, remote_adv;
563 if (bp->flags & B44_FLAG_FULL_DUPLEX)
564 val |= TX_CTRL_DUPLEX;
566 val &= ~TX_CTRL_DUPLEX;
567 bw32(bp, B44_TX_CTRL, val);
569 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
570 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
571 !b44_readphy(bp, MII_LPA, &remote_adv))
572 b44_set_flow_ctrl(bp, local_adv, remote_adv);
575 netif_carrier_on(bp->dev);
577 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
579 netif_carrier_off(bp->dev);
583 if (bmsr & BMSR_RFAULT)
584 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
587 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
592 static void b44_timer(unsigned long __opaque)
594 struct b44 *bp = (struct b44 *) __opaque;
596 spin_lock_irq(&bp->lock);
600 b44_stats_update(bp);
602 spin_unlock_irq(&bp->lock);
604 bp->timer.expires = jiffies + HZ;
605 add_timer(&bp->timer);
608 static void b44_tx(struct b44 *bp)
612 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
613 cur /= sizeof(struct dma_desc);
615 /* XXX needs updating when NETIF_F_SG is supported */
616 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
617 struct ring_info *rp = &bp->tx_buffers[cons];
618 struct sk_buff *skb = rp->skb;
622 pci_unmap_single(bp->pdev,
623 pci_unmap_addr(rp, mapping),
627 dev_kfree_skb_irq(skb);
631 if (netif_queue_stopped(bp->dev) &&
632 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
633 netif_wake_queue(bp->dev);
635 bw32(bp, B44_GPTIMER, 0);
638 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
639 * before the DMA address you give it. So we allocate 30 more bytes
640 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
641 * point the chip at 30 bytes past where the rx_header will go.
643 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
646 struct ring_info *src_map, *map;
647 struct rx_header *rh;
655 src_map = &bp->rx_buffers[src_idx];
656 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
657 map = &bp->rx_buffers[dest_idx];
658 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
662 mapping = pci_map_single(bp->pdev, skb->data,
666 /* Hardware bug work-around, the chip is unable to do PCI DMA
667 to/from anything above 1GB :-( */
668 if (dma_mapping_error(mapping) ||
669 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
671 if (!dma_mapping_error(mapping))
672 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
673 dev_kfree_skb_any(skb);
674 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
677 mapping = pci_map_single(bp->pdev, skb->data,
680 if (dma_mapping_error(mapping) ||
681 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
682 if (!dma_mapping_error(mapping))
683 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
684 dev_kfree_skb_any(skb);
690 skb_reserve(skb, bp->rx_offset);
692 rh = (struct rx_header *)
693 (skb->data - bp->rx_offset);
698 pci_unmap_addr_set(map, mapping, mapping);
703 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
704 if (dest_idx == (B44_RX_RING_SIZE - 1))
705 ctrl |= DESC_CTRL_EOT;
707 dp = &bp->rx_ring[dest_idx];
708 dp->ctrl = cpu_to_le32(ctrl);
709 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
711 if (bp->flags & B44_FLAG_RX_RING_HACK)
712 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
713 dest_idx * sizeof(dp),
716 return RX_PKT_BUF_SZ;
719 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
721 struct dma_desc *src_desc, *dest_desc;
722 struct ring_info *src_map, *dest_map;
723 struct rx_header *rh;
727 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
728 dest_desc = &bp->rx_ring[dest_idx];
729 dest_map = &bp->rx_buffers[dest_idx];
730 src_desc = &bp->rx_ring[src_idx];
731 src_map = &bp->rx_buffers[src_idx];
733 dest_map->skb = src_map->skb;
734 rh = (struct rx_header *) src_map->skb->data;
737 pci_unmap_addr_set(dest_map, mapping,
738 pci_unmap_addr(src_map, mapping));
740 if (bp->flags & B44_FLAG_RX_RING_HACK)
741 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
742 src_idx * sizeof(src_desc),
745 ctrl = src_desc->ctrl;
746 if (dest_idx == (B44_RX_RING_SIZE - 1))
747 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
749 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
751 dest_desc->ctrl = ctrl;
752 dest_desc->addr = src_desc->addr;
756 if (bp->flags & B44_FLAG_RX_RING_HACK)
757 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
758 dest_idx * sizeof(dest_desc),
761 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
766 static int b44_rx(struct b44 *bp, int budget)
772 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
773 prod /= sizeof(struct dma_desc);
776 while (cons != prod && budget > 0) {
777 struct ring_info *rp = &bp->rx_buffers[cons];
778 struct sk_buff *skb = rp->skb;
779 dma_addr_t map = pci_unmap_addr(rp, mapping);
780 struct rx_header *rh;
783 pci_dma_sync_single_for_cpu(bp->pdev, map,
786 rh = (struct rx_header *) skb->data;
787 len = cpu_to_le16(rh->len);
788 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
789 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
791 b44_recycle_rx(bp, cons, bp->rx_prod);
793 bp->stats.rx_dropped++;
803 len = cpu_to_le16(rh->len);
804 } while (len == 0 && i++ < 5);
812 if (len > RX_COPY_THRESHOLD) {
814 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
817 pci_unmap_single(bp->pdev, map,
818 skb_size, PCI_DMA_FROMDEVICE);
819 /* Leave out rx_header */
820 skb_put(skb, len+bp->rx_offset);
821 skb_pull(skb,bp->rx_offset);
823 struct sk_buff *copy_skb;
825 b44_recycle_rx(bp, cons, bp->rx_prod);
826 copy_skb = dev_alloc_skb(len + 2);
827 if (copy_skb == NULL)
828 goto drop_it_no_recycle;
830 copy_skb->dev = bp->dev;
831 skb_reserve(copy_skb, 2);
832 skb_put(copy_skb, len);
833 /* DMA sync done above, copy just the actual packet */
834 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
838 skb->ip_summed = CHECKSUM_NONE;
839 skb->protocol = eth_type_trans(skb, bp->dev);
840 netif_receive_skb(skb);
841 bp->dev->last_rx = jiffies;
845 bp->rx_prod = (bp->rx_prod + 1) &
846 (B44_RX_RING_SIZE - 1);
847 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
851 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
856 static int b44_poll(struct net_device *netdev, int *budget)
858 struct b44 *bp = netdev_priv(netdev);
861 spin_lock_irq(&bp->lock);
863 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
864 /* spin_lock(&bp->tx_lock); */
866 /* spin_unlock(&bp->tx_lock); */
868 spin_unlock_irq(&bp->lock);
871 if (bp->istat & ISTAT_RX) {
872 int orig_budget = *budget;
875 if (orig_budget > netdev->quota)
876 orig_budget = netdev->quota;
878 work_done = b44_rx(bp, orig_budget);
880 *budget -= work_done;
881 netdev->quota -= work_done;
883 if (work_done >= orig_budget)
887 if (bp->istat & ISTAT_ERRORS) {
890 spin_lock_irqsave(&bp->lock, flags);
893 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
894 netif_wake_queue(bp->dev);
895 spin_unlock_irqrestore(&bp->lock, flags);
900 netif_rx_complete(netdev);
904 return (done ? 0 : 1);
907 static irqreturn_t b44_interrupt(int irq, void *dev_id)
909 struct net_device *dev = dev_id;
910 struct b44 *bp = netdev_priv(dev);
914 spin_lock(&bp->lock);
916 istat = br32(bp, B44_ISTAT);
917 imask = br32(bp, B44_IMASK);
919 /* The interrupt mask register controls which interrupt bits
920 * will actually raise an interrupt to the CPU when set by hw/firmware,
921 * but doesn't mask off the bits.
927 if (unlikely(!netif_running(dev))) {
928 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
932 if (netif_rx_schedule_prep(dev)) {
933 /* NOTE: These writes are posted by the readback of
934 * the ISTAT register below.
937 __b44_disable_ints(bp);
938 __netif_rx_schedule(dev);
940 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
945 bw32(bp, B44_ISTAT, istat);
948 spin_unlock(&bp->lock);
949 return IRQ_RETVAL(handled);
952 static void b44_tx_timeout(struct net_device *dev)
954 struct b44 *bp = netdev_priv(dev);
956 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
959 spin_lock_irq(&bp->lock);
963 b44_init_hw(bp, B44_FULL_RESET);
965 spin_unlock_irq(&bp->lock);
969 netif_wake_queue(dev);
972 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
974 struct b44 *bp = netdev_priv(dev);
975 struct sk_buff *bounce_skb;
976 int rc = NETDEV_TX_OK;
978 u32 len, entry, ctrl;
981 spin_lock_irq(&bp->lock);
983 /* This is a hard error, log it. */
984 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
985 netif_stop_queue(dev);
986 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
991 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
992 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
993 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
994 if (!dma_mapping_error(mapping))
995 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
997 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
1002 mapping = pci_map_single(bp->pdev, bounce_skb->data,
1003 len, PCI_DMA_TODEVICE);
1004 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
1005 if (!dma_mapping_error(mapping))
1006 pci_unmap_single(bp->pdev, mapping,
1007 len, PCI_DMA_TODEVICE);
1008 dev_kfree_skb_any(bounce_skb);
1012 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1013 dev_kfree_skb_any(skb);
1017 entry = bp->tx_prod;
1018 bp->tx_buffers[entry].skb = skb;
1019 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1021 ctrl = (len & DESC_CTRL_LEN);
1022 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1023 if (entry == (B44_TX_RING_SIZE - 1))
1024 ctrl |= DESC_CTRL_EOT;
1026 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1027 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1029 if (bp->flags & B44_FLAG_TX_RING_HACK)
1030 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1031 entry * sizeof(bp->tx_ring[0]),
1034 entry = NEXT_TX(entry);
1036 bp->tx_prod = entry;
1040 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1041 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1042 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1043 if (bp->flags & B44_FLAG_REORDER_BUG)
1044 br32(bp, B44_DMATX_PTR);
1046 if (TX_BUFFS_AVAIL(bp) < 1)
1047 netif_stop_queue(dev);
1049 dev->trans_start = jiffies;
1052 spin_unlock_irq(&bp->lock);
1057 rc = NETDEV_TX_BUSY;
1061 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1063 struct b44 *bp = netdev_priv(dev);
1065 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1068 if (!netif_running(dev)) {
1069 /* We'll just catch it later when the
1076 spin_lock_irq(&bp->lock);
1080 b44_init_hw(bp, B44_FULL_RESET);
1081 spin_unlock_irq(&bp->lock);
1083 b44_enable_ints(bp);
1088 /* Free up pending packets in all rx/tx rings.
1090 * The chip has been shut down and the driver detached from
1091 * the networking, so no interrupts or new tx packets will
1092 * end up in the driver. bp->lock is not held and we are not
1093 * in an interrupt context and thus may sleep.
1095 static void b44_free_rings(struct b44 *bp)
1097 struct ring_info *rp;
1100 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1101 rp = &bp->rx_buffers[i];
1103 if (rp->skb == NULL)
1105 pci_unmap_single(bp->pdev,
1106 pci_unmap_addr(rp, mapping),
1108 PCI_DMA_FROMDEVICE);
1109 dev_kfree_skb_any(rp->skb);
1113 /* XXX needs changes once NETIF_F_SG is set... */
1114 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1115 rp = &bp->tx_buffers[i];
1117 if (rp->skb == NULL)
1119 pci_unmap_single(bp->pdev,
1120 pci_unmap_addr(rp, mapping),
1123 dev_kfree_skb_any(rp->skb);
1128 /* Initialize tx/rx rings for packet processing.
1130 * The chip has been shut down and the driver detached from
1131 * the networking, so no interrupts or new tx packets will
1132 * end up in the driver.
1134 static void b44_init_rings(struct b44 *bp)
1140 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1141 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1143 if (bp->flags & B44_FLAG_RX_RING_HACK)
1144 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1146 PCI_DMA_BIDIRECTIONAL);
1148 if (bp->flags & B44_FLAG_TX_RING_HACK)
1149 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1153 for (i = 0; i < bp->rx_pending; i++) {
1154 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1160 * Must not be invoked with interrupt sources disabled and
1161 * the hardware shutdown down.
1163 static void b44_free_consistent(struct b44 *bp)
1165 kfree(bp->rx_buffers);
1166 bp->rx_buffers = NULL;
1167 kfree(bp->tx_buffers);
1168 bp->tx_buffers = NULL;
1170 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1171 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1176 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1177 bp->rx_ring, bp->rx_ring_dma);
1179 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1182 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1183 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1188 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1189 bp->tx_ring, bp->tx_ring_dma);
1191 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1196 * Must not be invoked with interrupt sources disabled and
1197 * the hardware shutdown down. Can sleep.
1199 static int b44_alloc_consistent(struct b44 *bp)
1203 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1204 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1205 if (!bp->rx_buffers)
1208 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1209 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1210 if (!bp->tx_buffers)
1213 size = DMA_TABLE_BYTES;
1214 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1216 /* Allocation may have failed due to pci_alloc_consistent
1217 insisting on use of GFP_DMA, which is more restrictive
1218 than necessary... */
1219 struct dma_desc *rx_ring;
1220 dma_addr_t rx_ring_dma;
1222 rx_ring = kzalloc(size, GFP_KERNEL);
1226 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1230 if (dma_mapping_error(rx_ring_dma) ||
1231 rx_ring_dma + size > B44_DMA_MASK) {
1236 bp->rx_ring = rx_ring;
1237 bp->rx_ring_dma = rx_ring_dma;
1238 bp->flags |= B44_FLAG_RX_RING_HACK;
1241 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1243 /* Allocation may have failed due to pci_alloc_consistent
1244 insisting on use of GFP_DMA, which is more restrictive
1245 than necessary... */
1246 struct dma_desc *tx_ring;
1247 dma_addr_t tx_ring_dma;
1249 tx_ring = kzalloc(size, GFP_KERNEL);
1253 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1257 if (dma_mapping_error(tx_ring_dma) ||
1258 tx_ring_dma + size > B44_DMA_MASK) {
1263 bp->tx_ring = tx_ring;
1264 bp->tx_ring_dma = tx_ring_dma;
1265 bp->flags |= B44_FLAG_TX_RING_HACK;
1271 b44_free_consistent(bp);
1275 /* bp->lock is held. */
1276 static void b44_clear_stats(struct b44 *bp)
1280 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1281 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1283 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1287 /* bp->lock is held. */
1288 static void b44_chip_reset(struct b44 *bp)
1290 if (ssb_is_core_up(bp)) {
1291 bw32(bp, B44_RCV_LAZY, 0);
1292 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1293 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1294 bw32(bp, B44_DMATX_CTRL, 0);
1295 bp->tx_prod = bp->tx_cons = 0;
1296 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1297 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1300 bw32(bp, B44_DMARX_CTRL, 0);
1301 bp->rx_prod = bp->rx_cons = 0;
1303 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1310 b44_clear_stats(bp);
1312 /* Make PHY accessible. */
1313 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1314 (0x0d & MDIO_CTRL_MAXF_MASK)));
1315 br32(bp, B44_MDIO_CTRL);
1317 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1318 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1319 br32(bp, B44_ENET_CTRL);
1320 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1322 u32 val = br32(bp, B44_DEVCTRL);
1324 if (val & DEVCTRL_EPR) {
1325 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1326 br32(bp, B44_DEVCTRL);
1329 bp->flags |= B44_FLAG_INTERNAL_PHY;
1333 /* bp->lock is held. */
1334 static void b44_halt(struct b44 *bp)
1336 b44_disable_ints(bp);
1340 /* bp->lock is held. */
1341 static void __b44_set_mac_addr(struct b44 *bp)
1343 bw32(bp, B44_CAM_CTRL, 0);
1344 if (!(bp->dev->flags & IFF_PROMISC)) {
1347 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1348 val = br32(bp, B44_CAM_CTRL);
1349 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1353 static int b44_set_mac_addr(struct net_device *dev, void *p)
1355 struct b44 *bp = netdev_priv(dev);
1356 struct sockaddr *addr = p;
1358 if (netif_running(dev))
1361 if (!is_valid_ether_addr(addr->sa_data))
1364 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1366 spin_lock_irq(&bp->lock);
1367 __b44_set_mac_addr(bp);
1368 spin_unlock_irq(&bp->lock);
1373 /* Called at device open time to get the chip ready for
1374 * packet processing. Invoked with bp->lock held.
1376 static void __b44_set_rx_mode(struct net_device *);
1377 static void b44_init_hw(struct b44 *bp, int reset_kind)
1382 if (reset_kind == B44_FULL_RESET) {
1387 /* Enable CRC32, set proper LED modes and power on PHY */
1388 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1389 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1391 /* This sets the MAC address too. */
1392 __b44_set_rx_mode(bp->dev);
1394 /* MTU + eth header + possible VLAN tag + struct rx_header */
1395 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1398 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1399 if (reset_kind == B44_PARTIAL_RESET) {
1400 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1401 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1403 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1404 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1405 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1407 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1409 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1410 bp->rx_prod = bp->rx_pending;
1412 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1415 val = br32(bp, B44_ENET_CTRL);
1416 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1419 static int b44_open(struct net_device *dev)
1421 struct b44 *bp = netdev_priv(dev);
1424 err = b44_alloc_consistent(bp);
1429 b44_init_hw(bp, B44_FULL_RESET);
1433 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1434 if (unlikely(err < 0)) {
1437 b44_free_consistent(bp);
1441 init_timer(&bp->timer);
1442 bp->timer.expires = jiffies + HZ;
1443 bp->timer.data = (unsigned long) bp;
1444 bp->timer.function = b44_timer;
1445 add_timer(&bp->timer);
1447 b44_enable_ints(bp);
1448 netif_start_queue(dev);
1454 /*static*/ void b44_dump_state(struct b44 *bp)
1456 u32 val32, val32_2, val32_3, val32_4, val32_5;
1459 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1460 printk("DEBUG: PCI status [%04x] \n", val16);
1465 #ifdef CONFIG_NET_POLL_CONTROLLER
1467 * Polling receive - used by netconsole and other diagnostic tools
1468 * to allow network i/o with interrupts disabled.
1470 static void b44_poll_controller(struct net_device *dev)
1472 disable_irq(dev->irq);
1473 b44_interrupt(dev->irq, dev);
1474 enable_irq(dev->irq);
1478 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1481 u32 *pattern = (u32 *) pp;
1483 for (i = 0; i < bytes; i += sizeof(u32)) {
1484 bw32(bp, B44_FILT_ADDR, table_offset + i);
1485 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1489 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1492 int k, j, len = offset;
1493 int ethaddr_bytes = ETH_ALEN;
1495 memset(ppattern + offset, 0xff, magicsync);
1496 for (j = 0; j < magicsync; j++)
1497 set_bit(len++, (unsigned long *) pmask);
1499 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1500 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1501 ethaddr_bytes = ETH_ALEN;
1503 ethaddr_bytes = B44_PATTERN_SIZE - len;
1504 if (ethaddr_bytes <=0)
1506 for (k = 0; k< ethaddr_bytes; k++) {
1507 ppattern[offset + magicsync +
1508 (j * ETH_ALEN) + k] = macaddr[k];
1510 set_bit(len, (unsigned long *) pmask);
1516 /* Setup magic packet patterns in the b44 WOL
1517 * pattern matching filter.
1519 static void b44_setup_pseudo_magicp(struct b44 *bp)
1523 int plen0, plen1, plen2;
1525 u8 pwol_mask[B44_PMASK_SIZE];
1527 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1528 if (!pwol_pattern) {
1529 printk(KERN_ERR PFX "Memory not available for WOL\n");
1533 /* Ipv4 magic packet pattern - pattern 0.*/
1534 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1535 memset(pwol_mask, 0, B44_PMASK_SIZE);
1536 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537 B44_ETHIPV4UDP_HLEN);
1539 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1542 /* Raw ethernet II magic packet pattern - pattern 1 */
1543 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544 memset(pwol_mask, 0, B44_PMASK_SIZE);
1545 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1548 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1550 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551 B44_PMASK_BASE + B44_PMASK_SIZE);
1553 /* Ipv6 magic packet pattern - pattern 2 */
1554 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1555 memset(pwol_mask, 0, B44_PMASK_SIZE);
1556 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557 B44_ETHIPV6UDP_HLEN);
1559 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1560 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1561 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1562 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1564 kfree(pwol_pattern);
1566 /* set these pattern's lengths: one less than each real length */
1567 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1568 bw32(bp, B44_WKUP_LEN, val);
1570 /* enable wakeup pattern matching */
1571 val = br32(bp, B44_DEVCTRL);
1572 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1576 static void b44_setup_wol(struct b44 *bp)
1581 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1583 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1585 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1587 val = bp->dev->dev_addr[2] << 24 |
1588 bp->dev->dev_addr[3] << 16 |
1589 bp->dev->dev_addr[4] << 8 |
1590 bp->dev->dev_addr[5];
1591 bw32(bp, B44_ADDR_LO, val);
1593 val = bp->dev->dev_addr[0] << 8 |
1594 bp->dev->dev_addr[1];
1595 bw32(bp, B44_ADDR_HI, val);
1597 val = br32(bp, B44_DEVCTRL);
1598 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1601 b44_setup_pseudo_magicp(bp);
1604 val = br32(bp, B44_SBTMSLOW);
1605 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1607 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1608 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1612 static int b44_close(struct net_device *dev)
1614 struct b44 *bp = netdev_priv(dev);
1616 netif_stop_queue(dev);
1618 netif_poll_disable(dev);
1620 del_timer_sync(&bp->timer);
1622 spin_lock_irq(&bp->lock);
1629 netif_carrier_off(dev);
1631 spin_unlock_irq(&bp->lock);
1633 free_irq(dev->irq, dev);
1635 netif_poll_enable(dev);
1637 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1638 b44_init_hw(bp, B44_PARTIAL_RESET);
1642 b44_free_consistent(bp);
1647 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1649 struct b44 *bp = netdev_priv(dev);
1650 struct net_device_stats *nstat = &bp->stats;
1651 struct b44_hw_stats *hwstat = &bp->hw_stats;
1653 /* Convert HW stats into netdevice stats. */
1654 nstat->rx_packets = hwstat->rx_pkts;
1655 nstat->tx_packets = hwstat->tx_pkts;
1656 nstat->rx_bytes = hwstat->rx_octets;
1657 nstat->tx_bytes = hwstat->tx_octets;
1658 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1659 hwstat->tx_oversize_pkts +
1660 hwstat->tx_underruns +
1661 hwstat->tx_excessive_cols +
1662 hwstat->tx_late_cols);
1663 nstat->multicast = hwstat->tx_multicast_pkts;
1664 nstat->collisions = hwstat->tx_total_cols;
1666 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1667 hwstat->rx_undersize);
1668 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1669 nstat->rx_frame_errors = hwstat->rx_align_errs;
1670 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1671 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1672 hwstat->rx_oversize_pkts +
1673 hwstat->rx_missed_pkts +
1674 hwstat->rx_crc_align_errs +
1675 hwstat->rx_undersize +
1676 hwstat->rx_crc_errs +
1677 hwstat->rx_align_errs +
1678 hwstat->rx_symbol_errs);
1680 nstat->tx_aborted_errors = hwstat->tx_underruns;
1682 /* Carrier lost counter seems to be broken for some devices */
1683 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1689 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1691 struct dev_mc_list *mclist;
1694 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1695 mclist = dev->mc_list;
1696 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1697 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1702 static void __b44_set_rx_mode(struct net_device *dev)
1704 struct b44 *bp = netdev_priv(dev);
1707 val = br32(bp, B44_RXCONFIG);
1708 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1709 if (dev->flags & IFF_PROMISC) {
1710 val |= RXCONFIG_PROMISC;
1711 bw32(bp, B44_RXCONFIG, val);
1713 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1716 __b44_set_mac_addr(bp);
1718 if ((dev->flags & IFF_ALLMULTI) ||
1719 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1720 val |= RXCONFIG_ALLMULTI;
1722 i = __b44_load_mcast(bp, dev);
1725 __b44_cam_write(bp, zero, i);
1727 bw32(bp, B44_RXCONFIG, val);
1728 val = br32(bp, B44_CAM_CTRL);
1729 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1733 static void b44_set_rx_mode(struct net_device *dev)
1735 struct b44 *bp = netdev_priv(dev);
1737 spin_lock_irq(&bp->lock);
1738 __b44_set_rx_mode(dev);
1739 spin_unlock_irq(&bp->lock);
1742 static u32 b44_get_msglevel(struct net_device *dev)
1744 struct b44 *bp = netdev_priv(dev);
1745 return bp->msg_enable;
1748 static void b44_set_msglevel(struct net_device *dev, u32 value)
1750 struct b44 *bp = netdev_priv(dev);
1751 bp->msg_enable = value;
1754 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1756 struct b44 *bp = netdev_priv(dev);
1757 struct pci_dev *pci_dev = bp->pdev;
1759 strcpy (info->driver, DRV_MODULE_NAME);
1760 strcpy (info->version, DRV_MODULE_VERSION);
1761 strcpy (info->bus_info, pci_name(pci_dev));
1764 static int b44_nway_reset(struct net_device *dev)
1766 struct b44 *bp = netdev_priv(dev);
1770 spin_lock_irq(&bp->lock);
1771 b44_readphy(bp, MII_BMCR, &bmcr);
1772 b44_readphy(bp, MII_BMCR, &bmcr);
1774 if (bmcr & BMCR_ANENABLE) {
1775 b44_writephy(bp, MII_BMCR,
1776 bmcr | BMCR_ANRESTART);
1779 spin_unlock_irq(&bp->lock);
1784 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1786 struct b44 *bp = netdev_priv(dev);
1788 cmd->supported = (SUPPORTED_Autoneg);
1789 cmd->supported |= (SUPPORTED_100baseT_Half |
1790 SUPPORTED_100baseT_Full |
1791 SUPPORTED_10baseT_Half |
1792 SUPPORTED_10baseT_Full |
1795 cmd->advertising = 0;
1796 if (bp->flags & B44_FLAG_ADV_10HALF)
1797 cmd->advertising |= ADVERTISED_10baseT_Half;
1798 if (bp->flags & B44_FLAG_ADV_10FULL)
1799 cmd->advertising |= ADVERTISED_10baseT_Full;
1800 if (bp->flags & B44_FLAG_ADV_100HALF)
1801 cmd->advertising |= ADVERTISED_100baseT_Half;
1802 if (bp->flags & B44_FLAG_ADV_100FULL)
1803 cmd->advertising |= ADVERTISED_100baseT_Full;
1804 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1805 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1806 SPEED_100 : SPEED_10;
1807 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1808 DUPLEX_FULL : DUPLEX_HALF;
1810 cmd->phy_address = bp->phy_addr;
1811 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1812 XCVR_INTERNAL : XCVR_EXTERNAL;
1813 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1814 AUTONEG_DISABLE : AUTONEG_ENABLE;
1815 if (cmd->autoneg == AUTONEG_ENABLE)
1816 cmd->advertising |= ADVERTISED_Autoneg;
1817 if (!netif_running(dev)){
1826 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1828 struct b44 *bp = netdev_priv(dev);
1830 /* We do not support gigabit. */
1831 if (cmd->autoneg == AUTONEG_ENABLE) {
1832 if (cmd->advertising &
1833 (ADVERTISED_1000baseT_Half |
1834 ADVERTISED_1000baseT_Full))
1836 } else if ((cmd->speed != SPEED_100 &&
1837 cmd->speed != SPEED_10) ||
1838 (cmd->duplex != DUPLEX_HALF &&
1839 cmd->duplex != DUPLEX_FULL)) {
1843 spin_lock_irq(&bp->lock);
1845 if (cmd->autoneg == AUTONEG_ENABLE) {
1846 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1847 B44_FLAG_100_BASE_T |
1848 B44_FLAG_FULL_DUPLEX |
1849 B44_FLAG_ADV_10HALF |
1850 B44_FLAG_ADV_10FULL |
1851 B44_FLAG_ADV_100HALF |
1852 B44_FLAG_ADV_100FULL);
1853 if (cmd->advertising == 0) {
1854 bp->flags |= (B44_FLAG_ADV_10HALF |
1855 B44_FLAG_ADV_10FULL |
1856 B44_FLAG_ADV_100HALF |
1857 B44_FLAG_ADV_100FULL);
1859 if (cmd->advertising & ADVERTISED_10baseT_Half)
1860 bp->flags |= B44_FLAG_ADV_10HALF;
1861 if (cmd->advertising & ADVERTISED_10baseT_Full)
1862 bp->flags |= B44_FLAG_ADV_10FULL;
1863 if (cmd->advertising & ADVERTISED_100baseT_Half)
1864 bp->flags |= B44_FLAG_ADV_100HALF;
1865 if (cmd->advertising & ADVERTISED_100baseT_Full)
1866 bp->flags |= B44_FLAG_ADV_100FULL;
1869 bp->flags |= B44_FLAG_FORCE_LINK;
1870 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1871 if (cmd->speed == SPEED_100)
1872 bp->flags |= B44_FLAG_100_BASE_T;
1873 if (cmd->duplex == DUPLEX_FULL)
1874 bp->flags |= B44_FLAG_FULL_DUPLEX;
1877 if (netif_running(dev))
1880 spin_unlock_irq(&bp->lock);
1885 static void b44_get_ringparam(struct net_device *dev,
1886 struct ethtool_ringparam *ering)
1888 struct b44 *bp = netdev_priv(dev);
1890 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1891 ering->rx_pending = bp->rx_pending;
1893 /* XXX ethtool lacks a tx_max_pending, oops... */
1896 static int b44_set_ringparam(struct net_device *dev,
1897 struct ethtool_ringparam *ering)
1899 struct b44 *bp = netdev_priv(dev);
1901 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1902 (ering->rx_mini_pending != 0) ||
1903 (ering->rx_jumbo_pending != 0) ||
1904 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1907 spin_lock_irq(&bp->lock);
1909 bp->rx_pending = ering->rx_pending;
1910 bp->tx_pending = ering->tx_pending;
1914 b44_init_hw(bp, B44_FULL_RESET);
1915 netif_wake_queue(bp->dev);
1916 spin_unlock_irq(&bp->lock);
1918 b44_enable_ints(bp);
1923 static void b44_get_pauseparam(struct net_device *dev,
1924 struct ethtool_pauseparam *epause)
1926 struct b44 *bp = netdev_priv(dev);
1929 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1931 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1933 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1936 static int b44_set_pauseparam(struct net_device *dev,
1937 struct ethtool_pauseparam *epause)
1939 struct b44 *bp = netdev_priv(dev);
1941 spin_lock_irq(&bp->lock);
1942 if (epause->autoneg)
1943 bp->flags |= B44_FLAG_PAUSE_AUTO;
1945 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1946 if (epause->rx_pause)
1947 bp->flags |= B44_FLAG_RX_PAUSE;
1949 bp->flags &= ~B44_FLAG_RX_PAUSE;
1950 if (epause->tx_pause)
1951 bp->flags |= B44_FLAG_TX_PAUSE;
1953 bp->flags &= ~B44_FLAG_TX_PAUSE;
1954 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1957 b44_init_hw(bp, B44_FULL_RESET);
1959 __b44_set_flow_ctrl(bp, bp->flags);
1961 spin_unlock_irq(&bp->lock);
1963 b44_enable_ints(bp);
1968 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1972 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1977 static int b44_get_stats_count(struct net_device *dev)
1979 return ARRAY_SIZE(b44_gstrings);
1982 static void b44_get_ethtool_stats(struct net_device *dev,
1983 struct ethtool_stats *stats, u64 *data)
1985 struct b44 *bp = netdev_priv(dev);
1986 u32 *val = &bp->hw_stats.tx_good_octets;
1989 spin_lock_irq(&bp->lock);
1991 b44_stats_update(bp);
1993 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1996 spin_unlock_irq(&bp->lock);
1999 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2001 struct b44 *bp = netdev_priv(dev);
2003 wol->supported = WAKE_MAGIC;
2004 if (bp->flags & B44_FLAG_WOL_ENABLE)
2005 wol->wolopts = WAKE_MAGIC;
2008 memset(&wol->sopass, 0, sizeof(wol->sopass));
2011 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2013 struct b44 *bp = netdev_priv(dev);
2015 spin_lock_irq(&bp->lock);
2016 if (wol->wolopts & WAKE_MAGIC)
2017 bp->flags |= B44_FLAG_WOL_ENABLE;
2019 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2020 spin_unlock_irq(&bp->lock);
2025 static const struct ethtool_ops b44_ethtool_ops = {
2026 .get_drvinfo = b44_get_drvinfo,
2027 .get_settings = b44_get_settings,
2028 .set_settings = b44_set_settings,
2029 .nway_reset = b44_nway_reset,
2030 .get_link = ethtool_op_get_link,
2031 .get_wol = b44_get_wol,
2032 .set_wol = b44_set_wol,
2033 .get_ringparam = b44_get_ringparam,
2034 .set_ringparam = b44_set_ringparam,
2035 .get_pauseparam = b44_get_pauseparam,
2036 .set_pauseparam = b44_set_pauseparam,
2037 .get_msglevel = b44_get_msglevel,
2038 .set_msglevel = b44_set_msglevel,
2039 .get_strings = b44_get_strings,
2040 .get_stats_count = b44_get_stats_count,
2041 .get_ethtool_stats = b44_get_ethtool_stats,
2042 .get_perm_addr = ethtool_op_get_perm_addr,
2045 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2047 struct mii_ioctl_data *data = if_mii(ifr);
2048 struct b44 *bp = netdev_priv(dev);
2051 if (!netif_running(dev))
2054 spin_lock_irq(&bp->lock);
2055 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2056 spin_unlock_irq(&bp->lock);
2061 /* Read 128-bytes of EEPROM. */
2062 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2065 u16 *ptr = (u16 *) data;
2067 for (i = 0; i < 128; i += 2)
2068 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2073 static int __devinit b44_get_invariants(struct b44 *bp)
2078 err = b44_read_eeprom(bp, &eeprom[0]);
2082 bp->dev->dev_addr[0] = eeprom[79];
2083 bp->dev->dev_addr[1] = eeprom[78];
2084 bp->dev->dev_addr[2] = eeprom[81];
2085 bp->dev->dev_addr[3] = eeprom[80];
2086 bp->dev->dev_addr[4] = eeprom[83];
2087 bp->dev->dev_addr[5] = eeprom[82];
2089 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2090 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2094 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2096 bp->phy_addr = eeprom[90] & 0x1f;
2098 /* With this, plus the rx_header prepended to the data by the
2099 * hardware, we'll land the ethernet header on a 2-byte boundary.
2103 bp->imask = IMASK_DEF;
2105 bp->core_unit = ssb_core_unit(bp);
2106 bp->dma_offset = SB_PCI_DMA;
2108 /* XXX - really required?
2109 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2112 if (ssb_get_core_rev(bp) >= 7)
2113 bp->flags |= B44_FLAG_B0_ANDLATER;
2119 static int __devinit b44_init_one(struct pci_dev *pdev,
2120 const struct pci_device_id *ent)
2122 static int b44_version_printed = 0;
2123 unsigned long b44reg_base, b44reg_len;
2124 struct net_device *dev;
2128 if (b44_version_printed++ == 0)
2129 printk(KERN_INFO "%s", version);
2131 err = pci_enable_device(pdev);
2133 dev_err(&pdev->dev, "Cannot enable PCI device, "
2138 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2140 "Cannot find proper PCI device "
2141 "base address, aborting.\n");
2143 goto err_out_disable_pdev;
2146 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2149 "Cannot obtain PCI resources, aborting.\n");
2150 goto err_out_disable_pdev;
2153 pci_set_master(pdev);
2155 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2157 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2158 goto err_out_free_res;
2161 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2163 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2164 goto err_out_free_res;
2167 b44reg_base = pci_resource_start(pdev, 0);
2168 b44reg_len = pci_resource_len(pdev, 0);
2170 dev = alloc_etherdev(sizeof(*bp));
2172 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2174 goto err_out_free_res;
2177 SET_MODULE_OWNER(dev);
2178 SET_NETDEV_DEV(dev,&pdev->dev);
2180 /* No interesting netdevice features in this card... */
2183 bp = netdev_priv(dev);
2187 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2189 spin_lock_init(&bp->lock);
2191 bp->regs = ioremap(b44reg_base, b44reg_len);
2192 if (bp->regs == 0UL) {
2193 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2195 goto err_out_free_dev;
2198 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2199 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2201 dev->open = b44_open;
2202 dev->stop = b44_close;
2203 dev->hard_start_xmit = b44_start_xmit;
2204 dev->get_stats = b44_get_stats;
2205 dev->set_multicast_list = b44_set_rx_mode;
2206 dev->set_mac_address = b44_set_mac_addr;
2207 dev->do_ioctl = b44_ioctl;
2208 dev->tx_timeout = b44_tx_timeout;
2209 dev->poll = b44_poll;
2211 dev->watchdog_timeo = B44_TX_TIMEOUT;
2212 #ifdef CONFIG_NET_POLL_CONTROLLER
2213 dev->poll_controller = b44_poll_controller;
2215 dev->change_mtu = b44_change_mtu;
2216 dev->irq = pdev->irq;
2217 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2219 netif_carrier_off(dev);
2221 err = b44_get_invariants(bp);
2224 "Problem fetching invariants of chip, aborting.\n");
2225 goto err_out_iounmap;
2228 bp->mii_if.dev = dev;
2229 bp->mii_if.mdio_read = b44_mii_read;
2230 bp->mii_if.mdio_write = b44_mii_write;
2231 bp->mii_if.phy_id = bp->phy_addr;
2232 bp->mii_if.phy_id_mask = 0x1f;
2233 bp->mii_if.reg_num_mask = 0x1f;
2235 /* By default, advertise all speed/duplex settings. */
2236 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2237 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2239 /* By default, auto-negotiate PAUSE. */
2240 bp->flags |= B44_FLAG_PAUSE_AUTO;
2242 err = register_netdev(dev);
2244 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2245 goto err_out_iounmap;
2248 pci_set_drvdata(pdev, dev);
2250 pci_save_state(bp->pdev);
2252 /* Chip reset provides power to the b44 MAC & PCI cores, which
2253 * is necessary for MAC register access.
2257 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2258 for (i = 0; i < 6; i++)
2259 printk("%2.2x%c", dev->dev_addr[i],
2260 i == 5 ? '\n' : ':');
2271 pci_release_regions(pdev);
2273 err_out_disable_pdev:
2274 pci_disable_device(pdev);
2275 pci_set_drvdata(pdev, NULL);
2279 static void __devexit b44_remove_one(struct pci_dev *pdev)
2281 struct net_device *dev = pci_get_drvdata(pdev);
2282 struct b44 *bp = netdev_priv(dev);
2284 unregister_netdev(dev);
2287 pci_release_regions(pdev);
2288 pci_disable_device(pdev);
2289 pci_set_drvdata(pdev, NULL);
2292 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2294 struct net_device *dev = pci_get_drvdata(pdev);
2295 struct b44 *bp = netdev_priv(dev);
2297 if (!netif_running(dev))
2300 del_timer_sync(&bp->timer);
2302 spin_lock_irq(&bp->lock);
2305 netif_carrier_off(bp->dev);
2306 netif_device_detach(bp->dev);
2309 spin_unlock_irq(&bp->lock);
2311 free_irq(dev->irq, dev);
2312 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2313 b44_init_hw(bp, B44_PARTIAL_RESET);
2316 pci_disable_device(pdev);
2320 static int b44_resume(struct pci_dev *pdev)
2322 struct net_device *dev = pci_get_drvdata(pdev);
2323 struct b44 *bp = netdev_priv(dev);
2326 pci_restore_state(pdev);
2327 rc = pci_enable_device(pdev);
2329 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2334 pci_set_master(pdev);
2336 if (!netif_running(dev))
2339 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2341 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2342 pci_disable_device(pdev);
2346 spin_lock_irq(&bp->lock);
2349 b44_init_hw(bp, B44_FULL_RESET);
2350 netif_device_attach(bp->dev);
2351 spin_unlock_irq(&bp->lock);
2353 bp->timer.expires = jiffies + HZ;
2354 add_timer(&bp->timer);
2356 b44_enable_ints(bp);
2357 netif_wake_queue(dev);
2361 static struct pci_driver b44_driver = {
2362 .name = DRV_MODULE_NAME,
2363 .id_table = b44_pci_tbl,
2364 .probe = b44_init_one,
2365 .remove = __devexit_p(b44_remove_one),
2366 .suspend = b44_suspend,
2367 .resume = b44_resume,
2370 static int __init b44_init(void)
2372 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2374 /* Setup paramaters for syncing RX/TX DMA descriptors */
2375 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2376 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2378 return pci_register_driver(&b44_driver);
2381 static void __exit b44_cleanup(void)
2383 pci_unregister_driver(&b44_driver);
2386 module_init(b44_init);
2387 module_exit(b44_cleanup);